]>
Commit | Line | Data |
---|---|---|
ceffc078 CO |
1 | /* |
2 | * linux/mm/filemap_xip.c | |
3 | * | |
4 | * Copyright (C) 2005 IBM Corporation | |
5 | * Author: Carsten Otte <[email protected]> | |
6 | * | |
7 | * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/fs.h> | |
12 | #include <linux/pagemap.h> | |
b95f1b31 | 13 | #include <linux/export.h> |
ceffc078 CO |
14 | #include <linux/uio.h> |
15 | #include <linux/rmap.h> | |
cddb8a5c | 16 | #include <linux/mmu_notifier.h> |
e8edc6e0 | 17 | #include <linux/sched.h> |
538f8ea6 NP |
18 | #include <linux/seqlock.h> |
19 | #include <linux/mutex.h> | |
5a0e3ad6 | 20 | #include <linux/gfp.h> |
ceffc078 | 21 | #include <asm/tlbflush.h> |
70688e4d | 22 | #include <asm/io.h> |
ceffc078 | 23 | |
a76c0b97 CO |
24 | /* |
25 | * We do use our own empty page to avoid interference with other users | |
26 | * of ZERO_PAGE(), such as /dev/zero | |
27 | */ | |
538f8ea6 NP |
28 | static DEFINE_MUTEX(xip_sparse_mutex); |
29 | static seqcount_t xip_sparse_seq = SEQCNT_ZERO; | |
a76c0b97 CO |
30 | static struct page *__xip_sparse_page; |
31 | ||
538f8ea6 | 32 | /* called under xip_sparse_mutex */ |
a76c0b97 CO |
33 | static struct page *xip_sparse_page(void) |
34 | { | |
35 | if (!__xip_sparse_page) { | |
c51b1a16 AM |
36 | struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); |
37 | ||
538f8ea6 NP |
38 | if (page) |
39 | __xip_sparse_page = page; | |
a76c0b97 CO |
40 | } |
41 | return __xip_sparse_page; | |
42 | } | |
43 | ||
ceffc078 CO |
44 | /* |
45 | * This is a file read routine for execute in place files, and uses | |
70688e4d | 46 | * the mapping->a_ops->get_xip_mem() function for the actual low-level |
ceffc078 CO |
47 | * stuff. |
48 | * | |
49 | * Note the struct file* is not used at all. It may be NULL. | |
50 | */ | |
70688e4d | 51 | static ssize_t |
ceffc078 CO |
52 | do_xip_mapping_read(struct address_space *mapping, |
53 | struct file_ra_state *_ra, | |
54 | struct file *filp, | |
70688e4d NP |
55 | char __user *buf, |
56 | size_t len, | |
57 | loff_t *ppos) | |
ceffc078 CO |
58 | { |
59 | struct inode *inode = mapping->host; | |
2004dc8e JK |
60 | pgoff_t index, end_index; |
61 | unsigned long offset; | |
70688e4d NP |
62 | loff_t isize, pos; |
63 | size_t copied = 0, error = 0; | |
ceffc078 | 64 | |
70688e4d | 65 | BUG_ON(!mapping->a_ops->get_xip_mem); |
ceffc078 | 66 | |
70688e4d NP |
67 | pos = *ppos; |
68 | index = pos >> PAGE_CACHE_SHIFT; | |
69 | offset = pos & ~PAGE_CACHE_MASK; | |
ceffc078 CO |
70 | |
71 | isize = i_size_read(inode); | |
72 | if (!isize) | |
73 | goto out; | |
74 | ||
75 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; | |
70688e4d NP |
76 | do { |
77 | unsigned long nr, left; | |
78 | void *xip_mem; | |
79 | unsigned long xip_pfn; | |
80 | int zero = 0; | |
ceffc078 CO |
81 | |
82 | /* nr is the maximum number of bytes to copy from this page */ | |
83 | nr = PAGE_CACHE_SIZE; | |
84 | if (index >= end_index) { | |
85 | if (index > end_index) | |
86 | goto out; | |
87 | nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; | |
88 | if (nr <= offset) { | |
89 | goto out; | |
90 | } | |
91 | } | |
92 | nr = nr - offset; | |
58984ce2 MS |
93 | if (nr > len - copied) |
94 | nr = len - copied; | |
ceffc078 | 95 | |
70688e4d NP |
96 | error = mapping->a_ops->get_xip_mem(mapping, index, 0, |
97 | &xip_mem, &xip_pfn); | |
98 | if (unlikely(error)) { | |
99 | if (error == -ENODATA) { | |
ceffc078 | 100 | /* sparse */ |
70688e4d NP |
101 | zero = 1; |
102 | } else | |
ceffc078 | 103 | goto out; |
afa597ba | 104 | } |
ceffc078 CO |
105 | |
106 | /* If users can be writing to this page using arbitrary | |
107 | * virtual addresses, take care about potential aliasing | |
108 | * before reading the page on the kernel side. | |
109 | */ | |
110 | if (mapping_writably_mapped(mapping)) | |
70688e4d | 111 | /* address based flush */ ; |
ceffc078 CO |
112 | |
113 | /* | |
70688e4d | 114 | * Ok, we have the mem, so now we can copy it to user space... |
ceffc078 CO |
115 | * |
116 | * The actor routine returns how many bytes were actually used.. | |
117 | * NOTE! This may not be the same as how much of a user buffer | |
118 | * we filled up (we may be padding etc), so we can only update | |
119 | * "pos" here (the actor routine has to update the user buffer | |
120 | * pointers and the remaining count). | |
121 | */ | |
70688e4d NP |
122 | if (!zero) |
123 | left = __copy_to_user(buf+copied, xip_mem+offset, nr); | |
124 | else | |
125 | left = __clear_user(buf + copied, nr); | |
ceffc078 | 126 | |
70688e4d NP |
127 | if (left) { |
128 | error = -EFAULT; | |
129 | goto out; | |
130 | } | |
ceffc078 | 131 | |
70688e4d NP |
132 | copied += (nr - left); |
133 | offset += (nr - left); | |
134 | index += offset >> PAGE_CACHE_SHIFT; | |
135 | offset &= ~PAGE_CACHE_MASK; | |
136 | } while (copied < len); | |
ceffc078 CO |
137 | |
138 | out: | |
70688e4d | 139 | *ppos = pos + copied; |
ceffc078 CO |
140 | if (filp) |
141 | file_accessed(filp); | |
70688e4d NP |
142 | |
143 | return (copied ? copied : error); | |
ceffc078 CO |
144 | } |
145 | ||
ceffc078 | 146 | ssize_t |
eb6fe0c3 | 147 | xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) |
ceffc078 | 148 | { |
eb6fe0c3 CO |
149 | if (!access_ok(VERIFY_WRITE, buf, len)) |
150 | return -EFAULT; | |
ceffc078 | 151 | |
70688e4d NP |
152 | return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp, |
153 | buf, len, ppos); | |
ceffc078 | 154 | } |
eb6fe0c3 | 155 | EXPORT_SYMBOL_GPL(xip_file_read); |
ceffc078 | 156 | |
ceffc078 CO |
157 | /* |
158 | * __xip_unmap is invoked from xip_unmap and | |
159 | * xip_write | |
160 | * | |
161 | * This function walks all vmas of the address_space and unmaps the | |
a76c0b97 | 162 | * __xip_sparse_page when found at pgoff. |
ceffc078 CO |
163 | */ |
164 | static void | |
165 | __xip_unmap (struct address_space * mapping, | |
166 | unsigned long pgoff) | |
167 | { | |
168 | struct vm_area_struct *vma; | |
169 | struct mm_struct *mm; | |
170 | struct prio_tree_iter iter; | |
171 | unsigned long address; | |
172 | pte_t *pte; | |
173 | pte_t pteval; | |
c0718806 | 174 | spinlock_t *ptl; |
67b02f11 | 175 | struct page *page; |
538f8ea6 NP |
176 | unsigned count; |
177 | int locked = 0; | |
178 | ||
179 | count = read_seqcount_begin(&xip_sparse_seq); | |
ceffc078 | 180 | |
a76c0b97 CO |
181 | page = __xip_sparse_page; |
182 | if (!page) | |
183 | return; | |
184 | ||
538f8ea6 | 185 | retry: |
3d48ae45 | 186 | mutex_lock(&mapping->i_mmap_mutex); |
ceffc078 CO |
187 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { |
188 | mm = vma->vm_mm; | |
189 | address = vma->vm_start + | |
190 | ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | |
191 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | |
479db0bf | 192 | pte = page_check_address(page, mm, address, &ptl, 1); |
c0718806 | 193 | if (pte) { |
ceffc078 | 194 | /* Nuke the page table entry. */ |
082ff0a9 | 195 | flush_cache_page(vma, address, pte_pfn(*pte)); |
cddb8a5c | 196 | pteval = ptep_clear_flush_notify(vma, address, pte); |
edc315fd | 197 | page_remove_rmap(page); |
d559db08 | 198 | dec_mm_counter(mm, MM_FILEPAGES); |
ceffc078 | 199 | BUG_ON(pte_dirty(pteval)); |
c0718806 | 200 | pte_unmap_unlock(pte, ptl); |
b5810039 | 201 | page_cache_release(page); |
ceffc078 CO |
202 | } |
203 | } | |
3d48ae45 | 204 | mutex_unlock(&mapping->i_mmap_mutex); |
538f8ea6 NP |
205 | |
206 | if (locked) { | |
207 | mutex_unlock(&xip_sparse_mutex); | |
208 | } else if (read_seqcount_retry(&xip_sparse_seq, count)) { | |
209 | mutex_lock(&xip_sparse_mutex); | |
210 | locked = 1; | |
211 | goto retry; | |
212 | } | |
ceffc078 CO |
213 | } |
214 | ||
215 | /* | |
54cb8821 | 216 | * xip_fault() is invoked via the vma operations vector for a |
ceffc078 CO |
217 | * mapped memory region to read in file data during a page fault. |
218 | * | |
54cb8821 | 219 | * This function is derived from filemap_fault, but used for execute in place |
ceffc078 | 220 | */ |
70688e4d | 221 | static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
ceffc078 | 222 | { |
70688e4d | 223 | struct file *file = vma->vm_file; |
ceffc078 CO |
224 | struct address_space *mapping = file->f_mapping; |
225 | struct inode *inode = mapping->host; | |
54cb8821 | 226 | pgoff_t size; |
70688e4d NP |
227 | void *xip_mem; |
228 | unsigned long xip_pfn; | |
229 | struct page *page; | |
230 | int error; | |
ceffc078 | 231 | |
54cb8821 | 232 | /* XXX: are VM_FAULT_ codes OK? */ |
538f8ea6 | 233 | again: |
ceffc078 | 234 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
d0217ac0 NP |
235 | if (vmf->pgoff >= size) |
236 | return VM_FAULT_SIGBUS; | |
ceffc078 | 237 | |
70688e4d NP |
238 | error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0, |
239 | &xip_mem, &xip_pfn); | |
240 | if (likely(!error)) | |
241 | goto found; | |
242 | if (error != -ENODATA) | |
d0217ac0 | 243 | return VM_FAULT_OOM; |
ceffc078 CO |
244 | |
245 | /* sparse block */ | |
70688e4d NP |
246 | if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) && |
247 | (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) && | |
ceffc078 | 248 | (!(mapping->host->i_sb->s_flags & MS_RDONLY))) { |
70688e4d NP |
249 | int err; |
250 | ||
ceffc078 | 251 | /* maybe shared writable, allocate new block */ |
14bac5ac | 252 | mutex_lock(&xip_sparse_mutex); |
70688e4d NP |
253 | error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1, |
254 | &xip_mem, &xip_pfn); | |
14bac5ac | 255 | mutex_unlock(&xip_sparse_mutex); |
70688e4d | 256 | if (error) |
d0217ac0 | 257 | return VM_FAULT_SIGBUS; |
70688e4d | 258 | /* unmap sparse mappings at pgoff from all other vmas */ |
d0217ac0 | 259 | __xip_unmap(mapping, vmf->pgoff); |
70688e4d NP |
260 | |
261 | found: | |
262 | err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, | |
263 | xip_pfn); | |
264 | if (err == -ENOMEM) | |
265 | return VM_FAULT_OOM; | |
99f02ef1 CO |
266 | /* |
267 | * err == -EBUSY is fine, we've raced against another thread | |
268 | * that faulted-in the same page | |
269 | */ | |
270 | if (err != -EBUSY) | |
271 | BUG_ON(err); | |
70688e4d | 272 | return VM_FAULT_NOPAGE; |
ceffc078 | 273 | } else { |
538f8ea6 NP |
274 | int err, ret = VM_FAULT_OOM; |
275 | ||
276 | mutex_lock(&xip_sparse_mutex); | |
277 | write_seqcount_begin(&xip_sparse_seq); | |
278 | error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0, | |
279 | &xip_mem, &xip_pfn); | |
280 | if (unlikely(!error)) { | |
281 | write_seqcount_end(&xip_sparse_seq); | |
282 | mutex_unlock(&xip_sparse_mutex); | |
283 | goto again; | |
284 | } | |
285 | if (error != -ENODATA) | |
286 | goto out; | |
a76c0b97 CO |
287 | /* not shared and writable, use xip_sparse_page() */ |
288 | page = xip_sparse_page(); | |
d0217ac0 | 289 | if (!page) |
538f8ea6 NP |
290 | goto out; |
291 | err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, | |
292 | page); | |
293 | if (err == -ENOMEM) | |
294 | goto out; | |
295 | ||
296 | ret = VM_FAULT_NOPAGE; | |
297 | out: | |
298 | write_seqcount_end(&xip_sparse_seq); | |
299 | mutex_unlock(&xip_sparse_mutex); | |
ceffc078 | 300 | |
538f8ea6 | 301 | return ret; |
70688e4d | 302 | } |
ceffc078 CO |
303 | } |
304 | ||
f0f37e2f | 305 | static const struct vm_operations_struct xip_file_vm_ops = { |
54cb8821 | 306 | .fault = xip_file_fault, |
ceffc078 CO |
307 | }; |
308 | ||
309 | int xip_file_mmap(struct file * file, struct vm_area_struct * vma) | |
310 | { | |
70688e4d | 311 | BUG_ON(!file->f_mapping->a_ops->get_xip_mem); |
ceffc078 CO |
312 | |
313 | file_accessed(file); | |
314 | vma->vm_ops = &xip_file_vm_ops; | |
70688e4d | 315 | vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP; |
ceffc078 CO |
316 | return 0; |
317 | } | |
318 | EXPORT_SYMBOL_GPL(xip_file_mmap); | |
319 | ||
320 | static ssize_t | |
eb6fe0c3 CO |
321 | __xip_file_write(struct file *filp, const char __user *buf, |
322 | size_t count, loff_t pos, loff_t *ppos) | |
ceffc078 | 323 | { |
eb6fe0c3 | 324 | struct address_space * mapping = filp->f_mapping; |
f5e54d6e | 325 | const struct address_space_operations *a_ops = mapping->a_ops; |
ceffc078 CO |
326 | struct inode *inode = mapping->host; |
327 | long status = 0; | |
ceffc078 | 328 | size_t bytes; |
ceffc078 CO |
329 | ssize_t written = 0; |
330 | ||
70688e4d | 331 | BUG_ON(!mapping->a_ops->get_xip_mem); |
ceffc078 | 332 | |
ceffc078 CO |
333 | do { |
334 | unsigned long index; | |
335 | unsigned long offset; | |
336 | size_t copied; | |
70688e4d NP |
337 | void *xip_mem; |
338 | unsigned long xip_pfn; | |
ceffc078 CO |
339 | |
340 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
341 | index = pos >> PAGE_CACHE_SHIFT; | |
342 | bytes = PAGE_CACHE_SIZE - offset; | |
343 | if (bytes > count) | |
344 | bytes = count; | |
345 | ||
70688e4d NP |
346 | status = a_ops->get_xip_mem(mapping, index, 0, |
347 | &xip_mem, &xip_pfn); | |
348 | if (status == -ENODATA) { | |
ceffc078 | 349 | /* we allocate a new page unmap it */ |
14bac5ac | 350 | mutex_lock(&xip_sparse_mutex); |
70688e4d NP |
351 | status = a_ops->get_xip_mem(mapping, index, 1, |
352 | &xip_mem, &xip_pfn); | |
14bac5ac | 353 | mutex_unlock(&xip_sparse_mutex); |
70688e4d | 354 | if (!status) |
eb6fe0c3 CO |
355 | /* unmap page at pgoff from all other vmas */ |
356 | __xip_unmap(mapping, index); | |
ceffc078 CO |
357 | } |
358 | ||
70688e4d | 359 | if (status) |
ceffc078 | 360 | break; |
ceffc078 | 361 | |
4a9e5ef1 | 362 | copied = bytes - |
70688e4d | 363 | __copy_from_user_nocache(xip_mem + offset, buf, bytes); |
4a9e5ef1 | 364 | |
ceffc078 CO |
365 | if (likely(copied > 0)) { |
366 | status = copied; | |
367 | ||
368 | if (status >= 0) { | |
369 | written += status; | |
370 | count -= status; | |
371 | pos += status; | |
372 | buf += status; | |
ceffc078 CO |
373 | } |
374 | } | |
375 | if (unlikely(copied != bytes)) | |
376 | if (status >= 0) | |
377 | status = -EFAULT; | |
378 | if (status < 0) | |
379 | break; | |
380 | } while (count); | |
381 | *ppos = pos; | |
382 | /* | |
383 | * No need to use i_size_read() here, the i_size | |
1b1dcc1b | 384 | * cannot change under us because we hold i_mutex. |
ceffc078 CO |
385 | */ |
386 | if (pos > inode->i_size) { | |
387 | i_size_write(inode, pos); | |
388 | mark_inode_dirty(inode); | |
389 | } | |
390 | ||
391 | return written ? written : status; | |
392 | } | |
393 | ||
eb6fe0c3 CO |
394 | ssize_t |
395 | xip_file_write(struct file *filp, const char __user *buf, size_t len, | |
396 | loff_t *ppos) | |
ceffc078 | 397 | { |
eb6fe0c3 CO |
398 | struct address_space *mapping = filp->f_mapping; |
399 | struct inode *inode = mapping->host; | |
400 | size_t count; | |
401 | loff_t pos; | |
402 | ssize_t ret; | |
ceffc078 | 403 | |
1b1dcc1b | 404 | mutex_lock(&inode->i_mutex); |
ceffc078 | 405 | |
eb6fe0c3 CO |
406 | if (!access_ok(VERIFY_READ, buf, len)) { |
407 | ret=-EFAULT; | |
408 | goto out_up; | |
ceffc078 CO |
409 | } |
410 | ||
ceffc078 | 411 | pos = *ppos; |
eb6fe0c3 | 412 | count = len; |
ceffc078 CO |
413 | |
414 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | |
415 | ||
eb6fe0c3 CO |
416 | /* We can write back this queue in page reclaim */ |
417 | current->backing_dev_info = mapping->backing_dev_info; | |
ceffc078 | 418 | |
eb6fe0c3 CO |
419 | ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); |
420 | if (ret) | |
421 | goto out_backing; | |
ceffc078 | 422 | if (count == 0) |
eb6fe0c3 | 423 | goto out_backing; |
ceffc078 | 424 | |
2f1936b8 | 425 | ret = file_remove_suid(filp); |
eb6fe0c3 CO |
426 | if (ret) |
427 | goto out_backing; | |
ceffc078 | 428 | |
870f4817 | 429 | file_update_time(filp); |
ceffc078 | 430 | |
eb6fe0c3 | 431 | ret = __xip_file_write (filp, buf, count, pos, ppos); |
ceffc078 | 432 | |
eb6fe0c3 CO |
433 | out_backing: |
434 | current->backing_dev_info = NULL; | |
435 | out_up: | |
1b1dcc1b | 436 | mutex_unlock(&inode->i_mutex); |
ceffc078 CO |
437 | return ret; |
438 | } | |
eb6fe0c3 | 439 | EXPORT_SYMBOL_GPL(xip_file_write); |
ceffc078 CO |
440 | |
441 | /* | |
442 | * truncate a page used for execute in place | |
70688e4d | 443 | * functionality is analog to block_truncate_page but does use get_xip_mem |
ceffc078 CO |
444 | * to get the page instead of page cache |
445 | */ | |
446 | int | |
447 | xip_truncate_page(struct address_space *mapping, loff_t from) | |
448 | { | |
449 | pgoff_t index = from >> PAGE_CACHE_SHIFT; | |
450 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | |
451 | unsigned blocksize; | |
452 | unsigned length; | |
70688e4d NP |
453 | void *xip_mem; |
454 | unsigned long xip_pfn; | |
455 | int err; | |
ceffc078 | 456 | |
70688e4d | 457 | BUG_ON(!mapping->a_ops->get_xip_mem); |
ceffc078 CO |
458 | |
459 | blocksize = 1 << mapping->host->i_blkbits; | |
460 | length = offset & (blocksize - 1); | |
461 | ||
462 | /* Block boundary? Nothing to do */ | |
463 | if (!length) | |
464 | return 0; | |
465 | ||
466 | length = blocksize - length; | |
467 | ||
70688e4d NP |
468 | err = mapping->a_ops->get_xip_mem(mapping, index, 0, |
469 | &xip_mem, &xip_pfn); | |
470 | if (unlikely(err)) { | |
471 | if (err == -ENODATA) | |
ceffc078 CO |
472 | /* Hole? No need to truncate */ |
473 | return 0; | |
eb6fe0c3 | 474 | else |
70688e4d | 475 | return err; |
afa597ba | 476 | } |
70688e4d | 477 | memset(xip_mem + offset, 0, length); |
eb6fe0c3 | 478 | return 0; |
ceffc078 CO |
479 | } |
480 | EXPORT_SYMBOL_GPL(xip_truncate_page); |