]>
Commit | Line | Data |
---|---|---|
ceffc078 CO |
1 | /* |
2 | * linux/mm/filemap_xip.c | |
3 | * | |
4 | * Copyright (C) 2005 IBM Corporation | |
5 | * Author: Carsten Otte <[email protected]> | |
6 | * | |
7 | * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/fs.h> | |
12 | #include <linux/pagemap.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/uio.h> | |
15 | #include <linux/rmap.h> | |
e8edc6e0 | 16 | #include <linux/sched.h> |
ceffc078 CO |
17 | #include <asm/tlbflush.h> |
18 | #include "filemap.h" | |
19 | ||
a76c0b97 CO |
20 | /* |
21 | * We do use our own empty page to avoid interference with other users | |
22 | * of ZERO_PAGE(), such as /dev/zero | |
23 | */ | |
24 | static struct page *__xip_sparse_page; | |
25 | ||
26 | static struct page *xip_sparse_page(void) | |
27 | { | |
28 | if (!__xip_sparse_page) { | |
29 | unsigned long zeroes = get_zeroed_page(GFP_HIGHUSER); | |
30 | if (zeroes) { | |
31 | static DEFINE_SPINLOCK(xip_alloc_lock); | |
32 | spin_lock(&xip_alloc_lock); | |
33 | if (!__xip_sparse_page) | |
34 | __xip_sparse_page = virt_to_page(zeroes); | |
35 | else | |
36 | free_page(zeroes); | |
37 | spin_unlock(&xip_alloc_lock); | |
38 | } | |
39 | } | |
40 | return __xip_sparse_page; | |
41 | } | |
42 | ||
ceffc078 CO |
43 | /* |
44 | * This is a file read routine for execute in place files, and uses | |
45 | * the mapping->a_ops->get_xip_page() function for the actual low-level | |
46 | * stuff. | |
47 | * | |
48 | * Note the struct file* is not used at all. It may be NULL. | |
49 | */ | |
50 | static void | |
51 | do_xip_mapping_read(struct address_space *mapping, | |
52 | struct file_ra_state *_ra, | |
53 | struct file *filp, | |
54 | loff_t *ppos, | |
55 | read_descriptor_t *desc, | |
56 | read_actor_t actor) | |
57 | { | |
58 | struct inode *inode = mapping->host; | |
59 | unsigned long index, end_index, offset; | |
60 | loff_t isize; | |
61 | ||
62 | BUG_ON(!mapping->a_ops->get_xip_page); | |
63 | ||
64 | index = *ppos >> PAGE_CACHE_SHIFT; | |
65 | offset = *ppos & ~PAGE_CACHE_MASK; | |
66 | ||
67 | isize = i_size_read(inode); | |
68 | if (!isize) | |
69 | goto out; | |
70 | ||
71 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; | |
72 | for (;;) { | |
73 | struct page *page; | |
74 | unsigned long nr, ret; | |
75 | ||
76 | /* nr is the maximum number of bytes to copy from this page */ | |
77 | nr = PAGE_CACHE_SIZE; | |
78 | if (index >= end_index) { | |
79 | if (index > end_index) | |
80 | goto out; | |
81 | nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; | |
82 | if (nr <= offset) { | |
83 | goto out; | |
84 | } | |
85 | } | |
86 | nr = nr - offset; | |
87 | ||
88 | page = mapping->a_ops->get_xip_page(mapping, | |
89 | index*(PAGE_SIZE/512), 0); | |
90 | if (!page) | |
91 | goto no_xip_page; | |
92 | if (unlikely(IS_ERR(page))) { | |
93 | if (PTR_ERR(page) == -ENODATA) { | |
94 | /* sparse */ | |
afa597ba | 95 | page = ZERO_PAGE(0); |
ceffc078 CO |
96 | } else { |
97 | desc->error = PTR_ERR(page); | |
98 | goto out; | |
99 | } | |
afa597ba | 100 | } |
ceffc078 CO |
101 | |
102 | /* If users can be writing to this page using arbitrary | |
103 | * virtual addresses, take care about potential aliasing | |
104 | * before reading the page on the kernel side. | |
105 | */ | |
106 | if (mapping_writably_mapped(mapping)) | |
107 | flush_dcache_page(page); | |
108 | ||
109 | /* | |
afa597ba | 110 | * Ok, we have the page, so now we can copy it to user space... |
ceffc078 CO |
111 | * |
112 | * The actor routine returns how many bytes were actually used.. | |
113 | * NOTE! This may not be the same as how much of a user buffer | |
114 | * we filled up (we may be padding etc), so we can only update | |
115 | * "pos" here (the actor routine has to update the user buffer | |
116 | * pointers and the remaining count). | |
117 | */ | |
118 | ret = actor(desc, page, offset, nr); | |
119 | offset += ret; | |
120 | index += offset >> PAGE_CACHE_SHIFT; | |
121 | offset &= ~PAGE_CACHE_MASK; | |
122 | ||
123 | if (ret == nr && desc->count) | |
124 | continue; | |
125 | goto out; | |
126 | ||
127 | no_xip_page: | |
128 | /* Did not get the page. Report it */ | |
129 | desc->error = -EIO; | |
130 | goto out; | |
131 | } | |
132 | ||
133 | out: | |
134 | *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; | |
135 | if (filp) | |
136 | file_accessed(filp); | |
137 | } | |
138 | ||
ceffc078 | 139 | ssize_t |
eb6fe0c3 | 140 | xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) |
ceffc078 | 141 | { |
eb6fe0c3 | 142 | read_descriptor_t desc; |
ceffc078 | 143 | |
eb6fe0c3 CO |
144 | if (!access_ok(VERIFY_WRITE, buf, len)) |
145 | return -EFAULT; | |
ceffc078 | 146 | |
eb6fe0c3 CO |
147 | desc.written = 0; |
148 | desc.arg.buf = buf; | |
149 | desc.count = len; | |
150 | desc.error = 0; | |
ceffc078 | 151 | |
eb6fe0c3 CO |
152 | do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp, |
153 | ppos, &desc, file_read_actor); | |
154 | ||
155 | if (desc.written) | |
156 | return desc.written; | |
157 | else | |
158 | return desc.error; | |
ceffc078 | 159 | } |
eb6fe0c3 | 160 | EXPORT_SYMBOL_GPL(xip_file_read); |
ceffc078 | 161 | |
ceffc078 CO |
162 | /* |
163 | * __xip_unmap is invoked from xip_unmap and | |
164 | * xip_write | |
165 | * | |
166 | * This function walks all vmas of the address_space and unmaps the | |
a76c0b97 | 167 | * __xip_sparse_page when found at pgoff. |
ceffc078 CO |
168 | */ |
169 | static void | |
170 | __xip_unmap (struct address_space * mapping, | |
171 | unsigned long pgoff) | |
172 | { | |
173 | struct vm_area_struct *vma; | |
174 | struct mm_struct *mm; | |
175 | struct prio_tree_iter iter; | |
176 | unsigned long address; | |
177 | pte_t *pte; | |
178 | pte_t pteval; | |
c0718806 | 179 | spinlock_t *ptl; |
67b02f11 | 180 | struct page *page; |
ceffc078 | 181 | |
a76c0b97 CO |
182 | page = __xip_sparse_page; |
183 | if (!page) | |
184 | return; | |
185 | ||
ceffc078 CO |
186 | spin_lock(&mapping->i_mmap_lock); |
187 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | |
188 | mm = vma->vm_mm; | |
189 | address = vma->vm_start + | |
190 | ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | |
191 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | |
c0718806 HD |
192 | pte = page_check_address(page, mm, address, &ptl); |
193 | if (pte) { | |
ceffc078 | 194 | /* Nuke the page table entry. */ |
082ff0a9 | 195 | flush_cache_page(vma, address, pte_pfn(*pte)); |
ceffc078 | 196 | pteval = ptep_clear_flush(vma, address, pte); |
7de6b805 | 197 | page_remove_rmap(page, vma); |
b5810039 | 198 | dec_mm_counter(mm, file_rss); |
ceffc078 | 199 | BUG_ON(pte_dirty(pteval)); |
c0718806 | 200 | pte_unmap_unlock(pte, ptl); |
b5810039 | 201 | page_cache_release(page); |
ceffc078 CO |
202 | } |
203 | } | |
204 | spin_unlock(&mapping->i_mmap_lock); | |
205 | } | |
206 | ||
207 | /* | |
54cb8821 | 208 | * xip_fault() is invoked via the vma operations vector for a |
ceffc078 CO |
209 | * mapped memory region to read in file data during a page fault. |
210 | * | |
54cb8821 | 211 | * This function is derived from filemap_fault, but used for execute in place |
ceffc078 | 212 | */ |
d0217ac0 | 213 | static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf) |
ceffc078 CO |
214 | { |
215 | struct file *file = area->vm_file; | |
216 | struct address_space *mapping = file->f_mapping; | |
217 | struct inode *inode = mapping->host; | |
218 | struct page *page; | |
54cb8821 | 219 | pgoff_t size; |
ceffc078 | 220 | |
54cb8821 | 221 | /* XXX: are VM_FAULT_ codes OK? */ |
ceffc078 CO |
222 | |
223 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | |
d0217ac0 NP |
224 | if (vmf->pgoff >= size) |
225 | return VM_FAULT_SIGBUS; | |
ceffc078 | 226 | |
54cb8821 | 227 | page = mapping->a_ops->get_xip_page(mapping, |
d0217ac0 | 228 | vmf->pgoff*(PAGE_SIZE/512), 0); |
a76c0b97 | 229 | if (!IS_ERR(page)) |
b5810039 | 230 | goto out; |
d0217ac0 NP |
231 | if (PTR_ERR(page) != -ENODATA) |
232 | return VM_FAULT_OOM; | |
ceffc078 CO |
233 | |
234 | /* sparse block */ | |
235 | if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) && | |
236 | (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) && | |
237 | (!(mapping->host->i_sb->s_flags & MS_RDONLY))) { | |
238 | /* maybe shared writable, allocate new block */ | |
54cb8821 | 239 | page = mapping->a_ops->get_xip_page(mapping, |
d0217ac0 NP |
240 | vmf->pgoff*(PAGE_SIZE/512), 1); |
241 | if (IS_ERR(page)) | |
242 | return VM_FAULT_SIGBUS; | |
ceffc078 | 243 | /* unmap page at pgoff from all other vmas */ |
d0217ac0 | 244 | __xip_unmap(mapping, vmf->pgoff); |
ceffc078 | 245 | } else { |
a76c0b97 CO |
246 | /* not shared and writable, use xip_sparse_page() */ |
247 | page = xip_sparse_page(); | |
d0217ac0 NP |
248 | if (!page) |
249 | return VM_FAULT_OOM; | |
ceffc078 CO |
250 | } |
251 | ||
b5810039 NP |
252 | out: |
253 | page_cache_get(page); | |
d0217ac0 | 254 | vmf->page = page; |
83c54070 | 255 | return 0; |
ceffc078 CO |
256 | } |
257 | ||
258 | static struct vm_operations_struct xip_file_vm_ops = { | |
54cb8821 | 259 | .fault = xip_file_fault, |
ceffc078 CO |
260 | }; |
261 | ||
262 | int xip_file_mmap(struct file * file, struct vm_area_struct * vma) | |
263 | { | |
264 | BUG_ON(!file->f_mapping->a_ops->get_xip_page); | |
265 | ||
266 | file_accessed(file); | |
267 | vma->vm_ops = &xip_file_vm_ops; | |
54cb8821 | 268 | vma->vm_flags |= VM_CAN_NONLINEAR; |
ceffc078 CO |
269 | return 0; |
270 | } | |
271 | EXPORT_SYMBOL_GPL(xip_file_mmap); | |
272 | ||
273 | static ssize_t | |
eb6fe0c3 CO |
274 | __xip_file_write(struct file *filp, const char __user *buf, |
275 | size_t count, loff_t pos, loff_t *ppos) | |
ceffc078 | 276 | { |
eb6fe0c3 | 277 | struct address_space * mapping = filp->f_mapping; |
f5e54d6e | 278 | const struct address_space_operations *a_ops = mapping->a_ops; |
ceffc078 CO |
279 | struct inode *inode = mapping->host; |
280 | long status = 0; | |
281 | struct page *page; | |
282 | size_t bytes; | |
ceffc078 CO |
283 | ssize_t written = 0; |
284 | ||
285 | BUG_ON(!mapping->a_ops->get_xip_page); | |
286 | ||
ceffc078 CO |
287 | do { |
288 | unsigned long index; | |
289 | unsigned long offset; | |
290 | size_t copied; | |
291 | ||
292 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
293 | index = pos >> PAGE_CACHE_SHIFT; | |
294 | bytes = PAGE_CACHE_SIZE - offset; | |
295 | if (bytes > count) | |
296 | bytes = count; | |
297 | ||
298 | /* | |
299 | * Bring in the user page that we will copy from _first_. | |
300 | * Otherwise there's a nasty deadlock on copying from the | |
301 | * same page as we're writing to, without it being marked | |
302 | * up-to-date. | |
303 | */ | |
304 | fault_in_pages_readable(buf, bytes); | |
305 | ||
306 | page = a_ops->get_xip_page(mapping, | |
eb6fe0c3 | 307 | index*(PAGE_SIZE/512), 0); |
ceffc078 CO |
308 | if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) { |
309 | /* we allocate a new page unmap it */ | |
310 | page = a_ops->get_xip_page(mapping, | |
eb6fe0c3 | 311 | index*(PAGE_SIZE/512), 1); |
ceffc078 | 312 | if (!IS_ERR(page)) |
eb6fe0c3 CO |
313 | /* unmap page at pgoff from all other vmas */ |
314 | __xip_unmap(mapping, index); | |
ceffc078 CO |
315 | } |
316 | ||
317 | if (IS_ERR(page)) { | |
318 | status = PTR_ERR(page); | |
319 | break; | |
320 | } | |
321 | ||
eb6fe0c3 | 322 | copied = filemap_copy_from_user(page, offset, buf, bytes); |
ceffc078 CO |
323 | flush_dcache_page(page); |
324 | if (likely(copied > 0)) { | |
325 | status = copied; | |
326 | ||
327 | if (status >= 0) { | |
328 | written += status; | |
329 | count -= status; | |
330 | pos += status; | |
331 | buf += status; | |
ceffc078 CO |
332 | } |
333 | } | |
334 | if (unlikely(copied != bytes)) | |
335 | if (status >= 0) | |
336 | status = -EFAULT; | |
337 | if (status < 0) | |
338 | break; | |
339 | } while (count); | |
340 | *ppos = pos; | |
341 | /* | |
342 | * No need to use i_size_read() here, the i_size | |
1b1dcc1b | 343 | * cannot change under us because we hold i_mutex. |
ceffc078 CO |
344 | */ |
345 | if (pos > inode->i_size) { | |
346 | i_size_write(inode, pos); | |
347 | mark_inode_dirty(inode); | |
348 | } | |
349 | ||
350 | return written ? written : status; | |
351 | } | |
352 | ||
eb6fe0c3 CO |
353 | ssize_t |
354 | xip_file_write(struct file *filp, const char __user *buf, size_t len, | |
355 | loff_t *ppos) | |
ceffc078 | 356 | { |
eb6fe0c3 CO |
357 | struct address_space *mapping = filp->f_mapping; |
358 | struct inode *inode = mapping->host; | |
359 | size_t count; | |
360 | loff_t pos; | |
361 | ssize_t ret; | |
ceffc078 | 362 | |
1b1dcc1b | 363 | mutex_lock(&inode->i_mutex); |
ceffc078 | 364 | |
eb6fe0c3 CO |
365 | if (!access_ok(VERIFY_READ, buf, len)) { |
366 | ret=-EFAULT; | |
367 | goto out_up; | |
ceffc078 CO |
368 | } |
369 | ||
ceffc078 | 370 | pos = *ppos; |
eb6fe0c3 | 371 | count = len; |
ceffc078 CO |
372 | |
373 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | |
374 | ||
eb6fe0c3 CO |
375 | /* We can write back this queue in page reclaim */ |
376 | current->backing_dev_info = mapping->backing_dev_info; | |
ceffc078 | 377 | |
eb6fe0c3 CO |
378 | ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); |
379 | if (ret) | |
380 | goto out_backing; | |
ceffc078 | 381 | if (count == 0) |
eb6fe0c3 | 382 | goto out_backing; |
ceffc078 | 383 | |
d3ac7f89 | 384 | ret = remove_suid(filp->f_path.dentry); |
eb6fe0c3 CO |
385 | if (ret) |
386 | goto out_backing; | |
ceffc078 | 387 | |
870f4817 | 388 | file_update_time(filp); |
ceffc078 | 389 | |
eb6fe0c3 | 390 | ret = __xip_file_write (filp, buf, count, pos, ppos); |
ceffc078 | 391 | |
eb6fe0c3 CO |
392 | out_backing: |
393 | current->backing_dev_info = NULL; | |
394 | out_up: | |
1b1dcc1b | 395 | mutex_unlock(&inode->i_mutex); |
ceffc078 CO |
396 | return ret; |
397 | } | |
eb6fe0c3 | 398 | EXPORT_SYMBOL_GPL(xip_file_write); |
ceffc078 CO |
399 | |
400 | /* | |
401 | * truncate a page used for execute in place | |
402 | * functionality is analog to block_truncate_page but does use get_xip_page | |
403 | * to get the page instead of page cache | |
404 | */ | |
405 | int | |
406 | xip_truncate_page(struct address_space *mapping, loff_t from) | |
407 | { | |
408 | pgoff_t index = from >> PAGE_CACHE_SHIFT; | |
409 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | |
410 | unsigned blocksize; | |
411 | unsigned length; | |
412 | struct page *page; | |
ceffc078 CO |
413 | |
414 | BUG_ON(!mapping->a_ops->get_xip_page); | |
415 | ||
416 | blocksize = 1 << mapping->host->i_blkbits; | |
417 | length = offset & (blocksize - 1); | |
418 | ||
419 | /* Block boundary? Nothing to do */ | |
420 | if (!length) | |
421 | return 0; | |
422 | ||
423 | length = blocksize - length; | |
424 | ||
425 | page = mapping->a_ops->get_xip_page(mapping, | |
426 | index*(PAGE_SIZE/512), 0); | |
ceffc078 | 427 | if (!page) |
eb6fe0c3 | 428 | return -ENOMEM; |
ceffc078 | 429 | if (unlikely(IS_ERR(page))) { |
eb6fe0c3 | 430 | if (PTR_ERR(page) == -ENODATA) |
ceffc078 CO |
431 | /* Hole? No need to truncate */ |
432 | return 0; | |
eb6fe0c3 CO |
433 | else |
434 | return PTR_ERR(page); | |
afa597ba | 435 | } |
01f2705d | 436 | zero_user_page(page, offset, length, KM_USER0); |
eb6fe0c3 | 437 | return 0; |
ceffc078 CO |
438 | } |
439 | EXPORT_SYMBOL_GPL(xip_truncate_page); |