]>
Commit | Line | Data |
---|---|---|
ceffc078 CO |
1 | /* |
2 | * linux/mm/filemap_xip.c | |
3 | * | |
4 | * Copyright (C) 2005 IBM Corporation | |
5 | * Author: Carsten Otte <[email protected]> | |
6 | * | |
7 | * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/fs.h> | |
12 | #include <linux/pagemap.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/uio.h> | |
15 | #include <linux/rmap.h> | |
e8edc6e0 | 16 | #include <linux/sched.h> |
ceffc078 | 17 | #include <asm/tlbflush.h> |
ceffc078 | 18 | |
a76c0b97 CO |
19 | /* |
20 | * We do use our own empty page to avoid interference with other users | |
21 | * of ZERO_PAGE(), such as /dev/zero | |
22 | */ | |
23 | static struct page *__xip_sparse_page; | |
24 | ||
25 | static struct page *xip_sparse_page(void) | |
26 | { | |
27 | if (!__xip_sparse_page) { | |
c51b1a16 AM |
28 | struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); |
29 | ||
30 | if (page) { | |
a76c0b97 CO |
31 | static DEFINE_SPINLOCK(xip_alloc_lock); |
32 | spin_lock(&xip_alloc_lock); | |
33 | if (!__xip_sparse_page) | |
c51b1a16 | 34 | __xip_sparse_page = page; |
a76c0b97 | 35 | else |
c51b1a16 | 36 | __free_page(page); |
a76c0b97 CO |
37 | spin_unlock(&xip_alloc_lock); |
38 | } | |
39 | } | |
40 | return __xip_sparse_page; | |
41 | } | |
42 | ||
ceffc078 CO |
43 | /* |
44 | * This is a file read routine for execute in place files, and uses | |
45 | * the mapping->a_ops->get_xip_page() function for the actual low-level | |
46 | * stuff. | |
47 | * | |
48 | * Note the struct file* is not used at all. It may be NULL. | |
49 | */ | |
50 | static void | |
51 | do_xip_mapping_read(struct address_space *mapping, | |
52 | struct file_ra_state *_ra, | |
53 | struct file *filp, | |
54 | loff_t *ppos, | |
55 | read_descriptor_t *desc, | |
56 | read_actor_t actor) | |
57 | { | |
58 | struct inode *inode = mapping->host; | |
2004dc8e JK |
59 | pgoff_t index, end_index; |
60 | unsigned long offset; | |
ceffc078 CO |
61 | loff_t isize; |
62 | ||
63 | BUG_ON(!mapping->a_ops->get_xip_page); | |
64 | ||
65 | index = *ppos >> PAGE_CACHE_SHIFT; | |
66 | offset = *ppos & ~PAGE_CACHE_MASK; | |
67 | ||
68 | isize = i_size_read(inode); | |
69 | if (!isize) | |
70 | goto out; | |
71 | ||
72 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; | |
73 | for (;;) { | |
74 | struct page *page; | |
75 | unsigned long nr, ret; | |
76 | ||
77 | /* nr is the maximum number of bytes to copy from this page */ | |
78 | nr = PAGE_CACHE_SIZE; | |
79 | if (index >= end_index) { | |
80 | if (index > end_index) | |
81 | goto out; | |
82 | nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; | |
83 | if (nr <= offset) { | |
84 | goto out; | |
85 | } | |
86 | } | |
87 | nr = nr - offset; | |
88 | ||
89 | page = mapping->a_ops->get_xip_page(mapping, | |
90 | index*(PAGE_SIZE/512), 0); | |
91 | if (!page) | |
92 | goto no_xip_page; | |
93 | if (unlikely(IS_ERR(page))) { | |
94 | if (PTR_ERR(page) == -ENODATA) { | |
95 | /* sparse */ | |
afa597ba | 96 | page = ZERO_PAGE(0); |
ceffc078 CO |
97 | } else { |
98 | desc->error = PTR_ERR(page); | |
99 | goto out; | |
100 | } | |
afa597ba | 101 | } |
ceffc078 CO |
102 | |
103 | /* If users can be writing to this page using arbitrary | |
104 | * virtual addresses, take care about potential aliasing | |
105 | * before reading the page on the kernel side. | |
106 | */ | |
107 | if (mapping_writably_mapped(mapping)) | |
108 | flush_dcache_page(page); | |
109 | ||
110 | /* | |
afa597ba | 111 | * Ok, we have the page, so now we can copy it to user space... |
ceffc078 CO |
112 | * |
113 | * The actor routine returns how many bytes were actually used.. | |
114 | * NOTE! This may not be the same as how much of a user buffer | |
115 | * we filled up (we may be padding etc), so we can only update | |
116 | * "pos" here (the actor routine has to update the user buffer | |
117 | * pointers and the remaining count). | |
118 | */ | |
119 | ret = actor(desc, page, offset, nr); | |
120 | offset += ret; | |
121 | index += offset >> PAGE_CACHE_SHIFT; | |
122 | offset &= ~PAGE_CACHE_MASK; | |
123 | ||
124 | if (ret == nr && desc->count) | |
125 | continue; | |
126 | goto out; | |
127 | ||
128 | no_xip_page: | |
129 | /* Did not get the page. Report it */ | |
130 | desc->error = -EIO; | |
131 | goto out; | |
132 | } | |
133 | ||
134 | out: | |
135 | *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; | |
136 | if (filp) | |
137 | file_accessed(filp); | |
138 | } | |
139 | ||
ceffc078 | 140 | ssize_t |
eb6fe0c3 | 141 | xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) |
ceffc078 | 142 | { |
eb6fe0c3 | 143 | read_descriptor_t desc; |
ceffc078 | 144 | |
eb6fe0c3 CO |
145 | if (!access_ok(VERIFY_WRITE, buf, len)) |
146 | return -EFAULT; | |
ceffc078 | 147 | |
eb6fe0c3 CO |
148 | desc.written = 0; |
149 | desc.arg.buf = buf; | |
150 | desc.count = len; | |
151 | desc.error = 0; | |
ceffc078 | 152 | |
eb6fe0c3 CO |
153 | do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp, |
154 | ppos, &desc, file_read_actor); | |
155 | ||
156 | if (desc.written) | |
157 | return desc.written; | |
158 | else | |
159 | return desc.error; | |
ceffc078 | 160 | } |
eb6fe0c3 | 161 | EXPORT_SYMBOL_GPL(xip_file_read); |
ceffc078 | 162 | |
ceffc078 CO |
163 | /* |
164 | * __xip_unmap is invoked from xip_unmap and | |
165 | * xip_write | |
166 | * | |
167 | * This function walks all vmas of the address_space and unmaps the | |
a76c0b97 | 168 | * __xip_sparse_page when found at pgoff. |
ceffc078 CO |
169 | */ |
170 | static void | |
171 | __xip_unmap (struct address_space * mapping, | |
172 | unsigned long pgoff) | |
173 | { | |
174 | struct vm_area_struct *vma; | |
175 | struct mm_struct *mm; | |
176 | struct prio_tree_iter iter; | |
177 | unsigned long address; | |
178 | pte_t *pte; | |
179 | pte_t pteval; | |
c0718806 | 180 | spinlock_t *ptl; |
67b02f11 | 181 | struct page *page; |
ceffc078 | 182 | |
a76c0b97 CO |
183 | page = __xip_sparse_page; |
184 | if (!page) | |
185 | return; | |
186 | ||
ceffc078 CO |
187 | spin_lock(&mapping->i_mmap_lock); |
188 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | |
189 | mm = vma->vm_mm; | |
190 | address = vma->vm_start + | |
191 | ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | |
192 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | |
c0718806 HD |
193 | pte = page_check_address(page, mm, address, &ptl); |
194 | if (pte) { | |
ceffc078 | 195 | /* Nuke the page table entry. */ |
082ff0a9 | 196 | flush_cache_page(vma, address, pte_pfn(*pte)); |
ceffc078 | 197 | pteval = ptep_clear_flush(vma, address, pte); |
7de6b805 | 198 | page_remove_rmap(page, vma); |
b5810039 | 199 | dec_mm_counter(mm, file_rss); |
ceffc078 | 200 | BUG_ON(pte_dirty(pteval)); |
c0718806 | 201 | pte_unmap_unlock(pte, ptl); |
b5810039 | 202 | page_cache_release(page); |
ceffc078 CO |
203 | } |
204 | } | |
205 | spin_unlock(&mapping->i_mmap_lock); | |
206 | } | |
207 | ||
208 | /* | |
54cb8821 | 209 | * xip_fault() is invoked via the vma operations vector for a |
ceffc078 CO |
210 | * mapped memory region to read in file data during a page fault. |
211 | * | |
54cb8821 | 212 | * This function is derived from filemap_fault, but used for execute in place |
ceffc078 | 213 | */ |
d0217ac0 | 214 | static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf) |
ceffc078 CO |
215 | { |
216 | struct file *file = area->vm_file; | |
217 | struct address_space *mapping = file->f_mapping; | |
218 | struct inode *inode = mapping->host; | |
219 | struct page *page; | |
54cb8821 | 220 | pgoff_t size; |
ceffc078 | 221 | |
54cb8821 | 222 | /* XXX: are VM_FAULT_ codes OK? */ |
ceffc078 CO |
223 | |
224 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | |
d0217ac0 NP |
225 | if (vmf->pgoff >= size) |
226 | return VM_FAULT_SIGBUS; | |
ceffc078 | 227 | |
54cb8821 | 228 | page = mapping->a_ops->get_xip_page(mapping, |
d0217ac0 | 229 | vmf->pgoff*(PAGE_SIZE/512), 0); |
a76c0b97 | 230 | if (!IS_ERR(page)) |
b5810039 | 231 | goto out; |
d0217ac0 NP |
232 | if (PTR_ERR(page) != -ENODATA) |
233 | return VM_FAULT_OOM; | |
ceffc078 CO |
234 | |
235 | /* sparse block */ | |
236 | if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) && | |
237 | (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) && | |
238 | (!(mapping->host->i_sb->s_flags & MS_RDONLY))) { | |
239 | /* maybe shared writable, allocate new block */ | |
54cb8821 | 240 | page = mapping->a_ops->get_xip_page(mapping, |
d0217ac0 NP |
241 | vmf->pgoff*(PAGE_SIZE/512), 1); |
242 | if (IS_ERR(page)) | |
243 | return VM_FAULT_SIGBUS; | |
ceffc078 | 244 | /* unmap page at pgoff from all other vmas */ |
d0217ac0 | 245 | __xip_unmap(mapping, vmf->pgoff); |
ceffc078 | 246 | } else { |
a76c0b97 CO |
247 | /* not shared and writable, use xip_sparse_page() */ |
248 | page = xip_sparse_page(); | |
d0217ac0 NP |
249 | if (!page) |
250 | return VM_FAULT_OOM; | |
ceffc078 CO |
251 | } |
252 | ||
b5810039 NP |
253 | out: |
254 | page_cache_get(page); | |
d0217ac0 | 255 | vmf->page = page; |
83c54070 | 256 | return 0; |
ceffc078 CO |
257 | } |
258 | ||
259 | static struct vm_operations_struct xip_file_vm_ops = { | |
54cb8821 | 260 | .fault = xip_file_fault, |
ceffc078 CO |
261 | }; |
262 | ||
263 | int xip_file_mmap(struct file * file, struct vm_area_struct * vma) | |
264 | { | |
265 | BUG_ON(!file->f_mapping->a_ops->get_xip_page); | |
266 | ||
267 | file_accessed(file); | |
268 | vma->vm_ops = &xip_file_vm_ops; | |
54cb8821 | 269 | vma->vm_flags |= VM_CAN_NONLINEAR; |
ceffc078 CO |
270 | return 0; |
271 | } | |
272 | EXPORT_SYMBOL_GPL(xip_file_mmap); | |
273 | ||
274 | static ssize_t | |
eb6fe0c3 CO |
275 | __xip_file_write(struct file *filp, const char __user *buf, |
276 | size_t count, loff_t pos, loff_t *ppos) | |
ceffc078 | 277 | { |
eb6fe0c3 | 278 | struct address_space * mapping = filp->f_mapping; |
f5e54d6e | 279 | const struct address_space_operations *a_ops = mapping->a_ops; |
ceffc078 CO |
280 | struct inode *inode = mapping->host; |
281 | long status = 0; | |
282 | struct page *page; | |
283 | size_t bytes; | |
ceffc078 CO |
284 | ssize_t written = 0; |
285 | ||
286 | BUG_ON(!mapping->a_ops->get_xip_page); | |
287 | ||
ceffc078 CO |
288 | do { |
289 | unsigned long index; | |
290 | unsigned long offset; | |
291 | size_t copied; | |
4a9e5ef1 | 292 | char *kaddr; |
ceffc078 CO |
293 | |
294 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
295 | index = pos >> PAGE_CACHE_SHIFT; | |
296 | bytes = PAGE_CACHE_SIZE - offset; | |
297 | if (bytes > count) | |
298 | bytes = count; | |
299 | ||
ceffc078 | 300 | page = a_ops->get_xip_page(mapping, |
eb6fe0c3 | 301 | index*(PAGE_SIZE/512), 0); |
ceffc078 CO |
302 | if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) { |
303 | /* we allocate a new page unmap it */ | |
304 | page = a_ops->get_xip_page(mapping, | |
eb6fe0c3 | 305 | index*(PAGE_SIZE/512), 1); |
ceffc078 | 306 | if (!IS_ERR(page)) |
eb6fe0c3 CO |
307 | /* unmap page at pgoff from all other vmas */ |
308 | __xip_unmap(mapping, index); | |
ceffc078 CO |
309 | } |
310 | ||
311 | if (IS_ERR(page)) { | |
312 | status = PTR_ERR(page); | |
313 | break; | |
314 | } | |
315 | ||
4a9e5ef1 NP |
316 | fault_in_pages_readable(buf, bytes); |
317 | kaddr = kmap_atomic(page, KM_USER0); | |
318 | copied = bytes - | |
369b8f5a | 319 | __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes); |
4a9e5ef1 | 320 | kunmap_atomic(kaddr, KM_USER0); |
ceffc078 | 321 | flush_dcache_page(page); |
4a9e5ef1 | 322 | |
ceffc078 CO |
323 | if (likely(copied > 0)) { |
324 | status = copied; | |
325 | ||
326 | if (status >= 0) { | |
327 | written += status; | |
328 | count -= status; | |
329 | pos += status; | |
330 | buf += status; | |
ceffc078 CO |
331 | } |
332 | } | |
333 | if (unlikely(copied != bytes)) | |
334 | if (status >= 0) | |
335 | status = -EFAULT; | |
336 | if (status < 0) | |
337 | break; | |
338 | } while (count); | |
339 | *ppos = pos; | |
340 | /* | |
341 | * No need to use i_size_read() here, the i_size | |
1b1dcc1b | 342 | * cannot change under us because we hold i_mutex. |
ceffc078 CO |
343 | */ |
344 | if (pos > inode->i_size) { | |
345 | i_size_write(inode, pos); | |
346 | mark_inode_dirty(inode); | |
347 | } | |
348 | ||
349 | return written ? written : status; | |
350 | } | |
351 | ||
eb6fe0c3 CO |
352 | ssize_t |
353 | xip_file_write(struct file *filp, const char __user *buf, size_t len, | |
354 | loff_t *ppos) | |
ceffc078 | 355 | { |
eb6fe0c3 CO |
356 | struct address_space *mapping = filp->f_mapping; |
357 | struct inode *inode = mapping->host; | |
358 | size_t count; | |
359 | loff_t pos; | |
360 | ssize_t ret; | |
ceffc078 | 361 | |
1b1dcc1b | 362 | mutex_lock(&inode->i_mutex); |
ceffc078 | 363 | |
eb6fe0c3 CO |
364 | if (!access_ok(VERIFY_READ, buf, len)) { |
365 | ret=-EFAULT; | |
366 | goto out_up; | |
ceffc078 CO |
367 | } |
368 | ||
ceffc078 | 369 | pos = *ppos; |
eb6fe0c3 | 370 | count = len; |
ceffc078 CO |
371 | |
372 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | |
373 | ||
eb6fe0c3 CO |
374 | /* We can write back this queue in page reclaim */ |
375 | current->backing_dev_info = mapping->backing_dev_info; | |
ceffc078 | 376 | |
eb6fe0c3 CO |
377 | ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); |
378 | if (ret) | |
379 | goto out_backing; | |
ceffc078 | 380 | if (count == 0) |
eb6fe0c3 | 381 | goto out_backing; |
ceffc078 | 382 | |
d3ac7f89 | 383 | ret = remove_suid(filp->f_path.dentry); |
eb6fe0c3 CO |
384 | if (ret) |
385 | goto out_backing; | |
ceffc078 | 386 | |
870f4817 | 387 | file_update_time(filp); |
ceffc078 | 388 | |
eb6fe0c3 | 389 | ret = __xip_file_write (filp, buf, count, pos, ppos); |
ceffc078 | 390 | |
eb6fe0c3 CO |
391 | out_backing: |
392 | current->backing_dev_info = NULL; | |
393 | out_up: | |
1b1dcc1b | 394 | mutex_unlock(&inode->i_mutex); |
ceffc078 CO |
395 | return ret; |
396 | } | |
eb6fe0c3 | 397 | EXPORT_SYMBOL_GPL(xip_file_write); |
ceffc078 CO |
398 | |
399 | /* | |
400 | * truncate a page used for execute in place | |
401 | * functionality is analog to block_truncate_page but does use get_xip_page | |
402 | * to get the page instead of page cache | |
403 | */ | |
404 | int | |
405 | xip_truncate_page(struct address_space *mapping, loff_t from) | |
406 | { | |
407 | pgoff_t index = from >> PAGE_CACHE_SHIFT; | |
408 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | |
409 | unsigned blocksize; | |
410 | unsigned length; | |
411 | struct page *page; | |
ceffc078 CO |
412 | |
413 | BUG_ON(!mapping->a_ops->get_xip_page); | |
414 | ||
415 | blocksize = 1 << mapping->host->i_blkbits; | |
416 | length = offset & (blocksize - 1); | |
417 | ||
418 | /* Block boundary? Nothing to do */ | |
419 | if (!length) | |
420 | return 0; | |
421 | ||
422 | length = blocksize - length; | |
423 | ||
424 | page = mapping->a_ops->get_xip_page(mapping, | |
425 | index*(PAGE_SIZE/512), 0); | |
ceffc078 | 426 | if (!page) |
eb6fe0c3 | 427 | return -ENOMEM; |
ceffc078 | 428 | if (unlikely(IS_ERR(page))) { |
eb6fe0c3 | 429 | if (PTR_ERR(page) == -ENODATA) |
ceffc078 CO |
430 | /* Hole? No need to truncate */ |
431 | return 0; | |
eb6fe0c3 CO |
432 | else |
433 | return PTR_ERR(page); | |
afa597ba | 434 | } |
eebd2aa3 | 435 | zero_user(page, offset, length); |
eb6fe0c3 | 436 | return 0; |
ceffc078 CO |
437 | } |
438 | EXPORT_SYMBOL_GPL(xip_truncate_page); |