]>
Commit | Line | Data |
---|---|---|
642fb4d1 DH |
1 | /* file-nommu.c: no-MMU version of ramfs |
2 | * | |
3 | * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells ([email protected]) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | #include <linux/fs.h> | |
14 | #include <linux/pagemap.h> | |
15 | #include <linux/highmem.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/string.h> | |
18 | #include <linux/smp_lock.h> | |
19 | #include <linux/backing-dev.h> | |
20 | #include <linux/ramfs.h> | |
21 | #include <linux/quotaops.h> | |
22 | #include <linux/pagevec.h> | |
23 | #include <linux/mman.h> | |
24 | ||
25 | #include <asm/uaccess.h> | |
26 | #include "internal.h" | |
27 | ||
28 | static int ramfs_nommu_setattr(struct dentry *, struct iattr *); | |
29 | ||
30 | struct address_space_operations ramfs_aops = { | |
31 | .readpage = simple_readpage, | |
32 | .prepare_write = simple_prepare_write, | |
33 | .commit_write = simple_commit_write | |
34 | }; | |
35 | ||
4b6f5d20 | 36 | const struct file_operations ramfs_file_operations = { |
642fb4d1 DH |
37 | .mmap = ramfs_nommu_mmap, |
38 | .get_unmapped_area = ramfs_nommu_get_unmapped_area, | |
39 | .read = generic_file_read, | |
40 | .write = generic_file_write, | |
41 | .fsync = simple_sync_file, | |
42 | .sendfile = generic_file_sendfile, | |
43 | .llseek = generic_file_llseek, | |
44 | }; | |
45 | ||
46 | struct inode_operations ramfs_file_inode_operations = { | |
47 | .setattr = ramfs_nommu_setattr, | |
48 | .getattr = simple_getattr, | |
49 | }; | |
50 | ||
51 | /*****************************************************************************/ | |
52 | /* | |
53 | * add a contiguous set of pages into a ramfs inode when it's truncated from | |
54 | * size 0 on the assumption that it's going to be used for an mmap of shared | |
55 | * memory | |
56 | */ | |
57 | static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) | |
58 | { | |
59 | struct pagevec lru_pvec; | |
60 | unsigned long npages, xpages, loop, limit; | |
61 | struct page *pages; | |
62 | unsigned order; | |
63 | void *data; | |
64 | int ret; | |
65 | ||
66 | /* make various checks */ | |
67 | order = get_order(newsize); | |
68 | if (unlikely(order >= MAX_ORDER)) | |
69 | goto too_big; | |
70 | ||
71 | limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; | |
72 | if (limit != RLIM_INFINITY && newsize > limit) | |
73 | goto fsize_exceeded; | |
74 | ||
75 | if (newsize > inode->i_sb->s_maxbytes) | |
76 | goto too_big; | |
77 | ||
78 | i_size_write(inode, newsize); | |
79 | ||
80 | /* allocate enough contiguous pages to be able to satisfy the | |
81 | * request */ | |
82 | pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order); | |
83 | if (!pages) | |
84 | return -ENOMEM; | |
85 | ||
86 | /* split the high-order page into an array of single pages */ | |
87 | xpages = 1UL << order; | |
88 | npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
89 | ||
84097518 | 90 | split_page(pages, order); |
642fb4d1 DH |
91 | |
92 | /* trim off any pages we don't actually require */ | |
93 | for (loop = npages; loop < xpages; loop++) | |
94 | __free_page(pages + loop); | |
95 | ||
96 | /* clear the memory we allocated */ | |
97 | newsize = PAGE_SIZE * npages; | |
98 | data = page_address(pages); | |
99 | memset(data, 0, newsize); | |
100 | ||
101 | /* attach all the pages to the inode's address space */ | |
102 | pagevec_init(&lru_pvec, 0); | |
103 | for (loop = 0; loop < npages; loop++) { | |
104 | struct page *page = pages + loop; | |
105 | ||
106 | ret = add_to_page_cache(page, inode->i_mapping, loop, GFP_KERNEL); | |
107 | if (ret < 0) | |
108 | goto add_error; | |
109 | ||
110 | if (!pagevec_add(&lru_pvec, page)) | |
111 | __pagevec_lru_add(&lru_pvec); | |
112 | ||
113 | unlock_page(page); | |
114 | } | |
115 | ||
116 | pagevec_lru_add(&lru_pvec); | |
117 | return 0; | |
118 | ||
119 | fsize_exceeded: | |
120 | send_sig(SIGXFSZ, current, 0); | |
121 | too_big: | |
122 | return -EFBIG; | |
123 | ||
124 | add_error: | |
125 | page_cache_release(pages + loop); | |
126 | for (loop++; loop < npages; loop++) | |
127 | __free_page(pages + loop); | |
128 | return ret; | |
129 | } | |
130 | ||
131 | /*****************************************************************************/ | |
132 | /* | |
133 | * check that file shrinkage doesn't leave any VMAs dangling in midair | |
134 | */ | |
135 | static int ramfs_nommu_check_mappings(struct inode *inode, | |
136 | size_t newsize, size_t size) | |
137 | { | |
138 | struct vm_area_struct *vma; | |
139 | struct prio_tree_iter iter; | |
140 | ||
141 | /* search for VMAs that fall within the dead zone */ | |
142 | vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, | |
143 | newsize >> PAGE_SHIFT, | |
144 | (size + PAGE_SIZE - 1) >> PAGE_SHIFT | |
145 | ) { | |
146 | /* found one - only interested if it's shared out of the page | |
147 | * cache */ | |
148 | if (vma->vm_flags & VM_SHARED) | |
149 | return -ETXTBSY; /* not quite true, but near enough */ | |
150 | } | |
151 | ||
152 | return 0; | |
153 | } | |
154 | ||
155 | /*****************************************************************************/ | |
156 | /* | |
157 | * | |
158 | */ | |
159 | static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size) | |
160 | { | |
161 | int ret; | |
162 | ||
163 | /* assume a truncate from zero size is going to be for the purposes of | |
164 | * shared mmap */ | |
165 | if (size == 0) { | |
166 | if (unlikely(newsize >> 32)) | |
167 | return -EFBIG; | |
168 | ||
169 | return ramfs_nommu_expand_for_mapping(inode, newsize); | |
170 | } | |
171 | ||
172 | /* check that a decrease in size doesn't cut off any shared mappings */ | |
173 | if (newsize < size) { | |
174 | ret = ramfs_nommu_check_mappings(inode, newsize, size); | |
175 | if (ret < 0) | |
176 | return ret; | |
177 | } | |
178 | ||
179 | ret = vmtruncate(inode, size); | |
180 | ||
181 | return ret; | |
182 | } | |
183 | ||
184 | /*****************************************************************************/ | |
185 | /* | |
186 | * handle a change of attributes | |
187 | * - we're specifically interested in a change of size | |
188 | */ | |
189 | static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia) | |
190 | { | |
191 | struct inode *inode = dentry->d_inode; | |
192 | unsigned int old_ia_valid = ia->ia_valid; | |
193 | int ret = 0; | |
194 | ||
195 | /* by providing our own setattr() method, we skip this quotaism */ | |
196 | if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) || | |
197 | (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid)) | |
198 | ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0; | |
199 | ||
200 | /* pick out size-changing events */ | |
201 | if (ia->ia_valid & ATTR_SIZE) { | |
202 | loff_t size = i_size_read(inode); | |
203 | if (ia->ia_size != size) { | |
204 | ret = ramfs_nommu_resize(inode, ia->ia_size, size); | |
205 | if (ret < 0 || ia->ia_valid == ATTR_SIZE) | |
206 | goto out; | |
207 | } else { | |
208 | /* we skipped the truncate but must still update | |
209 | * timestamps | |
210 | */ | |
211 | ia->ia_valid |= ATTR_MTIME|ATTR_CTIME; | |
212 | } | |
213 | } | |
214 | ||
215 | ret = inode_setattr(inode, ia); | |
216 | out: | |
217 | ia->ia_valid = old_ia_valid; | |
218 | return ret; | |
219 | } | |
220 | ||
221 | /*****************************************************************************/ | |
222 | /* | |
223 | * try to determine where a shared mapping can be made | |
224 | * - we require that: | |
225 | * - the pages to be mapped must exist | |
226 | * - the pages be physically contiguous in sequence | |
227 | */ | |
228 | unsigned long ramfs_nommu_get_unmapped_area(struct file *file, | |
229 | unsigned long addr, unsigned long len, | |
230 | unsigned long pgoff, unsigned long flags) | |
231 | { | |
232 | unsigned long maxpages, lpages, nr, loop, ret; | |
233 | struct inode *inode = file->f_dentry->d_inode; | |
234 | struct page **pages = NULL, **ptr, *page; | |
235 | loff_t isize; | |
236 | ||
237 | if (!(flags & MAP_SHARED)) | |
238 | return addr; | |
239 | ||
240 | /* the mapping mustn't extend beyond the EOF */ | |
241 | lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
242 | isize = i_size_read(inode); | |
243 | ||
244 | ret = -EINVAL; | |
245 | maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
246 | if (pgoff >= maxpages) | |
247 | goto out; | |
248 | ||
249 | if (maxpages - pgoff < lpages) | |
250 | goto out; | |
251 | ||
252 | /* gang-find the pages */ | |
253 | ret = -ENOMEM; | |
254 | pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL); | |
255 | if (!pages) | |
256 | goto out; | |
257 | ||
258 | nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages); | |
259 | if (nr != lpages) | |
260 | goto out; /* leave if some pages were missing */ | |
261 | ||
262 | /* check the pages for physical adjacency */ | |
263 | ptr = pages; | |
264 | page = *ptr++; | |
265 | page++; | |
266 | for (loop = lpages; loop > 1; loop--) | |
267 | if (*ptr++ != page++) | |
268 | goto out; | |
269 | ||
270 | /* okay - all conditions fulfilled */ | |
271 | ret = (unsigned long) page_address(pages[0]); | |
272 | ||
273 | out: | |
274 | if (pages) { | |
275 | ptr = pages; | |
276 | for (loop = lpages; loop > 0; loop--) | |
277 | put_page(*ptr++); | |
278 | kfree(pages); | |
279 | } | |
280 | ||
281 | return ret; | |
282 | } | |
283 | ||
284 | /*****************************************************************************/ | |
285 | /* | |
286 | * set up a mapping | |
287 | */ | |
288 | int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) | |
289 | { | |
290 | return 0; | |
291 | } |