]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * hugetlbpage-backed filesystem. Based on ramfs. | |
3 | * | |
6d49e352 | 4 | * Nadia Yvette Chambers, 2002 |
1da177e4 LT |
5 | * |
6 | * Copyright (C) 2002 Linus Torvalds. | |
3e89e1c5 | 7 | * License: GPL |
1da177e4 LT |
8 | */ |
9 | ||
9b857d26 AM |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | ||
1da177e4 LT |
12 | #include <linux/thread_info.h> |
13 | #include <asm/current.h> | |
174cd4b1 | 14 | #include <linux/sched/signal.h> /* remove ASAP */ |
70c3547e | 15 | #include <linux/falloc.h> |
1da177e4 LT |
16 | #include <linux/fs.h> |
17 | #include <linux/mount.h> | |
18 | #include <linux/file.h> | |
e73a75fa | 19 | #include <linux/kernel.h> |
1da177e4 LT |
20 | #include <linux/writeback.h> |
21 | #include <linux/pagemap.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/string.h> | |
16f7e0fe | 25 | #include <linux/capability.h> |
e73a75fa | 26 | #include <linux/ctype.h> |
1da177e4 LT |
27 | #include <linux/backing-dev.h> |
28 | #include <linux/hugetlb.h> | |
29 | #include <linux/pagevec.h> | |
e73a75fa | 30 | #include <linux/parser.h> |
036e0856 | 31 | #include <linux/mman.h> |
1da177e4 LT |
32 | #include <linux/slab.h> |
33 | #include <linux/dnotify.h> | |
34 | #include <linux/statfs.h> | |
35 | #include <linux/security.h> | |
1fd7317d | 36 | #include <linux/magic.h> |
290408d4 | 37 | #include <linux/migrate.h> |
34d0640e | 38 | #include <linux/uio.h> |
1da177e4 | 39 | |
7c0f6ba6 | 40 | #include <linux/uaccess.h> |
1da177e4 | 41 | |
ee9b6d61 | 42 | static const struct super_operations hugetlbfs_ops; |
f5e54d6e | 43 | static const struct address_space_operations hugetlbfs_aops; |
4b6f5d20 | 44 | const struct file_operations hugetlbfs_file_operations; |
92e1d5be AV |
45 | static const struct inode_operations hugetlbfs_dir_inode_operations; |
46 | static const struct inode_operations hugetlbfs_inode_operations; | |
1da177e4 | 47 | |
a1d776ee | 48 | struct hugetlbfs_config { |
4a25220d DH |
49 | struct hstate *hstate; |
50 | long max_hpages; | |
51 | long nr_inodes; | |
52 | long min_hpages; | |
53 | kuid_t uid; | |
54 | kgid_t gid; | |
55 | umode_t mode; | |
a1d776ee DG |
56 | }; |
57 | ||
1da177e4 LT |
58 | int sysctl_hugetlb_shm_group; |
59 | ||
e73a75fa RD |
60 | enum { |
61 | Opt_size, Opt_nr_inodes, | |
62 | Opt_mode, Opt_uid, Opt_gid, | |
7ca02d0a | 63 | Opt_pagesize, Opt_min_size, |
e73a75fa RD |
64 | Opt_err, |
65 | }; | |
66 | ||
a447c093 | 67 | static const match_table_t tokens = { |
e73a75fa RD |
68 | {Opt_size, "size=%s"}, |
69 | {Opt_nr_inodes, "nr_inodes=%s"}, | |
70 | {Opt_mode, "mode=%o"}, | |
71 | {Opt_uid, "uid=%u"}, | |
72 | {Opt_gid, "gid=%u"}, | |
a137e1cc | 73 | {Opt_pagesize, "pagesize=%s"}, |
7ca02d0a | 74 | {Opt_min_size, "min_size=%s"}, |
e73a75fa RD |
75 | {Opt_err, NULL}, |
76 | }; | |
77 | ||
70c3547e MK |
78 | #ifdef CONFIG_NUMA |
79 | static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, | |
80 | struct inode *inode, pgoff_t index) | |
81 | { | |
82 | vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, | |
83 | index); | |
84 | } | |
85 | ||
86 | static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) | |
87 | { | |
88 | mpol_cond_put(vma->vm_policy); | |
89 | } | |
90 | #else | |
91 | static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, | |
92 | struct inode *inode, pgoff_t index) | |
93 | { | |
94 | } | |
95 | ||
96 | static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) | |
97 | { | |
98 | } | |
99 | #endif | |
100 | ||
2e9b367c AL |
101 | static void huge_pagevec_release(struct pagevec *pvec) |
102 | { | |
103 | int i; | |
104 | ||
105 | for (i = 0; i < pagevec_count(pvec); ++i) | |
106 | put_page(pvec->pages[i]); | |
107 | ||
108 | pagevec_reinit(pvec); | |
109 | } | |
110 | ||
63489f8e MK |
111 | /* |
112 | * Mask used when checking the page offset value passed in via system | |
113 | * calls. This value will be converted to a loff_t which is signed. | |
114 | * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the | |
115 | * value. The extra bit (- 1 in the shift value) is to take the sign | |
116 | * bit into account. | |
117 | */ | |
118 | #define PGOFF_LOFFT_MAX \ | |
119 | (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) | |
120 | ||
1da177e4 LT |
121 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) |
122 | { | |
496ad9aa | 123 | struct inode *inode = file_inode(file); |
1da177e4 LT |
124 | loff_t len, vma_len; |
125 | int ret; | |
a5516438 | 126 | struct hstate *h = hstate_file(file); |
1da177e4 | 127 | |
68589bc3 | 128 | /* |
dec4ad86 DG |
129 | * vma address alignment (but not the pgoff alignment) has |
130 | * already been checked by prepare_hugepage_range. If you add | |
131 | * any error returns here, do so after setting VM_HUGETLB, so | |
132 | * is_vm_hugetlb_page tests below unmap_region go the right | |
133 | * way when do_mmap_pgoff unwinds (may be important on powerpc | |
134 | * and ia64). | |
68589bc3 | 135 | */ |
a2fce914 | 136 | vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; |
68589bc3 | 137 | vma->vm_ops = &hugetlb_vm_ops; |
1da177e4 | 138 | |
045c7a3f | 139 | /* |
63489f8e | 140 | * page based offset in vm_pgoff could be sufficiently large to |
5df63c2a MK |
141 | * overflow a loff_t when converted to byte offset. This can |
142 | * only happen on architectures where sizeof(loff_t) == | |
143 | * sizeof(unsigned long). So, only check in those instances. | |
045c7a3f | 144 | */ |
5df63c2a MK |
145 | if (sizeof(unsigned long) == sizeof(loff_t)) { |
146 | if (vma->vm_pgoff & PGOFF_LOFFT_MAX) | |
147 | return -EINVAL; | |
148 | } | |
045c7a3f | 149 | |
63489f8e | 150 | /* must be huge page aligned */ |
2b37c35e | 151 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) |
dec4ad86 DG |
152 | return -EINVAL; |
153 | ||
1da177e4 | 154 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
045c7a3f MK |
155 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
156 | /* check for overflow */ | |
157 | if (len < vma_len) | |
158 | return -EINVAL; | |
1da177e4 | 159 | |
5955102c | 160 | inode_lock(inode); |
1da177e4 | 161 | file_accessed(file); |
1da177e4 LT |
162 | |
163 | ret = -ENOMEM; | |
a1e78772 | 164 | if (hugetlb_reserve_pages(inode, |
a5516438 | 165 | vma->vm_pgoff >> huge_page_order(h), |
5a6fe125 MG |
166 | len >> huge_page_shift(h), vma, |
167 | vma->vm_flags)) | |
a43a8c39 | 168 | goto out; |
b45b5bd6 | 169 | |
4c887265 | 170 | ret = 0; |
b6174df5 | 171 | if (vma->vm_flags & VM_WRITE && inode->i_size < len) |
045c7a3f | 172 | i_size_write(inode, len); |
1da177e4 | 173 | out: |
5955102c | 174 | inode_unlock(inode); |
1da177e4 LT |
175 | |
176 | return ret; | |
177 | } | |
178 | ||
179 | /* | |
508034a3 | 180 | * Called under down_write(mmap_sem). |
1da177e4 LT |
181 | */ |
182 | ||
d2ba27e8 | 183 | #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
1da177e4 LT |
184 | static unsigned long |
185 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
186 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
187 | { | |
188 | struct mm_struct *mm = current->mm; | |
189 | struct vm_area_struct *vma; | |
a5516438 | 190 | struct hstate *h = hstate_file(file); |
08659355 | 191 | struct vm_unmapped_area_info info; |
1da177e4 | 192 | |
a5516438 | 193 | if (len & ~huge_page_mask(h)) |
1da177e4 LT |
194 | return -EINVAL; |
195 | if (len > TASK_SIZE) | |
196 | return -ENOMEM; | |
197 | ||
036e0856 | 198 | if (flags & MAP_FIXED) { |
a5516438 | 199 | if (prepare_hugepage_range(file, addr, len)) |
036e0856 BH |
200 | return -EINVAL; |
201 | return addr; | |
202 | } | |
203 | ||
1da177e4 | 204 | if (addr) { |
a5516438 | 205 | addr = ALIGN(addr, huge_page_size(h)); |
1da177e4 LT |
206 | vma = find_vma(mm, addr); |
207 | if (TASK_SIZE - len >= addr && | |
1be7107f | 208 | (!vma || addr + len <= vm_start_gap(vma))) |
1da177e4 LT |
209 | return addr; |
210 | } | |
211 | ||
08659355 ML |
212 | info.flags = 0; |
213 | info.length = len; | |
214 | info.low_limit = TASK_UNMAPPED_BASE; | |
215 | info.high_limit = TASK_SIZE; | |
216 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); | |
217 | info.align_offset = 0; | |
218 | return vm_unmapped_area(&info); | |
1da177e4 LT |
219 | } |
220 | #endif | |
221 | ||
34d0640e | 222 | static size_t |
e63e1e5a | 223 | hugetlbfs_read_actor(struct page *page, unsigned long offset, |
34d0640e | 224 | struct iov_iter *to, unsigned long size) |
e63e1e5a | 225 | { |
34d0640e | 226 | size_t copied = 0; |
e63e1e5a BP |
227 | int i, chunksize; |
228 | ||
e63e1e5a | 229 | /* Find which 4k chunk and offset with in that chunk */ |
09cbfeaf KS |
230 | i = offset >> PAGE_SHIFT; |
231 | offset = offset & ~PAGE_MASK; | |
e63e1e5a BP |
232 | |
233 | while (size) { | |
34d0640e | 234 | size_t n; |
09cbfeaf | 235 | chunksize = PAGE_SIZE; |
e63e1e5a BP |
236 | if (offset) |
237 | chunksize -= offset; | |
238 | if (chunksize > size) | |
239 | chunksize = size; | |
34d0640e AV |
240 | n = copy_page_to_iter(&page[i], offset, chunksize, to); |
241 | copied += n; | |
242 | if (n != chunksize) | |
243 | return copied; | |
e63e1e5a BP |
244 | offset = 0; |
245 | size -= chunksize; | |
e63e1e5a BP |
246 | i++; |
247 | } | |
34d0640e | 248 | return copied; |
e63e1e5a BP |
249 | } |
250 | ||
251 | /* | |
252 | * Support for read() - Find the page attached to f_mapping and copy out the | |
253 | * data. Its *very* similar to do_generic_mapping_read(), we can't use that | |
ea1754a0 | 254 | * since it has PAGE_SIZE assumptions. |
e63e1e5a | 255 | */ |
34d0640e | 256 | static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) |
e63e1e5a | 257 | { |
34d0640e AV |
258 | struct file *file = iocb->ki_filp; |
259 | struct hstate *h = hstate_file(file); | |
260 | struct address_space *mapping = file->f_mapping; | |
e63e1e5a | 261 | struct inode *inode = mapping->host; |
34d0640e AV |
262 | unsigned long index = iocb->ki_pos >> huge_page_shift(h); |
263 | unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); | |
e63e1e5a BP |
264 | unsigned long end_index; |
265 | loff_t isize; | |
266 | ssize_t retval = 0; | |
267 | ||
34d0640e | 268 | while (iov_iter_count(to)) { |
e63e1e5a | 269 | struct page *page; |
34d0640e | 270 | size_t nr, copied; |
e63e1e5a BP |
271 | |
272 | /* nr is the maximum number of bytes to copy from this page */ | |
a5516438 | 273 | nr = huge_page_size(h); |
a05b0855 AK |
274 | isize = i_size_read(inode); |
275 | if (!isize) | |
34d0640e | 276 | break; |
a05b0855 | 277 | end_index = (isize - 1) >> huge_page_shift(h); |
34d0640e AV |
278 | if (index > end_index) |
279 | break; | |
280 | if (index == end_index) { | |
a5516438 | 281 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; |
a05b0855 | 282 | if (nr <= offset) |
34d0640e | 283 | break; |
e63e1e5a BP |
284 | } |
285 | nr = nr - offset; | |
286 | ||
287 | /* Find the page */ | |
a05b0855 | 288 | page = find_lock_page(mapping, index); |
e63e1e5a BP |
289 | if (unlikely(page == NULL)) { |
290 | /* | |
291 | * We have a HOLE, zero out the user-buffer for the | |
292 | * length of the hole or request. | |
293 | */ | |
34d0640e | 294 | copied = iov_iter_zero(nr, to); |
e63e1e5a | 295 | } else { |
a05b0855 AK |
296 | unlock_page(page); |
297 | ||
e63e1e5a BP |
298 | /* |
299 | * We have the page, copy it to user space buffer. | |
300 | */ | |
34d0640e | 301 | copied = hugetlbfs_read_actor(page, offset, to, nr); |
09cbfeaf | 302 | put_page(page); |
e63e1e5a | 303 | } |
34d0640e AV |
304 | offset += copied; |
305 | retval += copied; | |
306 | if (copied != nr && iov_iter_count(to)) { | |
307 | if (!retval) | |
308 | retval = -EFAULT; | |
309 | break; | |
e63e1e5a | 310 | } |
a5516438 AK |
311 | index += offset >> huge_page_shift(h); |
312 | offset &= ~huge_page_mask(h); | |
e63e1e5a | 313 | } |
34d0640e | 314 | iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; |
e63e1e5a BP |
315 | return retval; |
316 | } | |
317 | ||
800d15a5 NP |
318 | static int hugetlbfs_write_begin(struct file *file, |
319 | struct address_space *mapping, | |
320 | loff_t pos, unsigned len, unsigned flags, | |
321 | struct page **pagep, void **fsdata) | |
1da177e4 LT |
322 | { |
323 | return -EINVAL; | |
324 | } | |
325 | ||
800d15a5 NP |
326 | static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, |
327 | loff_t pos, unsigned len, unsigned copied, | |
328 | struct page *page, void *fsdata) | |
1da177e4 | 329 | { |
800d15a5 | 330 | BUG(); |
1da177e4 LT |
331 | return -EINVAL; |
332 | } | |
333 | ||
b5cec28d | 334 | static void remove_huge_page(struct page *page) |
1da177e4 | 335 | { |
b9ea2515 | 336 | ClearPageDirty(page); |
1da177e4 | 337 | ClearPageUptodate(page); |
bd65cb86 | 338 | delete_from_page_cache(page); |
1da177e4 LT |
339 | } |
340 | ||
4aae8d1c | 341 | static void |
f808c13f | 342 | hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end) |
4aae8d1c MK |
343 | { |
344 | struct vm_area_struct *vma; | |
345 | ||
346 | /* | |
347 | * end == 0 indicates that the entire range after | |
348 | * start should be unmapped. | |
349 | */ | |
350 | vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) { | |
351 | unsigned long v_offset; | |
352 | unsigned long v_end; | |
353 | ||
354 | /* | |
355 | * Can the expression below overflow on 32-bit arches? | |
356 | * No, because the interval tree returns us only those vmas | |
357 | * which overlap the truncated area starting at pgoff, | |
358 | * and no vma on a 32-bit arch can span beyond the 4GB. | |
359 | */ | |
360 | if (vma->vm_pgoff < start) | |
361 | v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT; | |
362 | else | |
363 | v_offset = 0; | |
364 | ||
365 | if (!end) | |
366 | v_end = vma->vm_end; | |
367 | else { | |
368 | v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) | |
369 | + vma->vm_start; | |
370 | if (v_end > vma->vm_end) | |
371 | v_end = vma->vm_end; | |
372 | } | |
373 | ||
374 | unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end, | |
375 | NULL); | |
376 | } | |
377 | } | |
b5cec28d MK |
378 | |
379 | /* | |
380 | * remove_inode_hugepages handles two distinct cases: truncation and hole | |
381 | * punch. There are subtle differences in operation for each case. | |
4aae8d1c | 382 | * |
b5cec28d MK |
383 | * truncation is indicated by end of range being LLONG_MAX |
384 | * In this case, we first scan the range and release found pages. | |
385 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv | |
e7c58097 MK |
386 | * maps and global counts. Page faults can not race with truncation |
387 | * in this routine. hugetlb_no_page() prevents page faults in the | |
388 | * truncated range. It checks i_size before allocation, and again after | |
389 | * with the page table lock for the page held. The same lock must be | |
390 | * acquired to unmap a page. | |
b5cec28d MK |
391 | * hole punch is indicated if end is not LLONG_MAX |
392 | * In the hole punch case we scan the range and release found pages. | |
393 | * Only when releasing a page is the associated region/reserv map | |
394 | * deleted. The region/reserv map for ranges without associated | |
e7c58097 MK |
395 | * pages are not modified. Page faults can race with hole punch. |
396 | * This is indicated if we find a mapped page. | |
b5cec28d MK |
397 | * Note: If the passed end of range value is beyond the end of file, but |
398 | * not LLONG_MAX this routine still performs a hole punch operation. | |
399 | */ | |
400 | static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |
401 | loff_t lend) | |
1da177e4 | 402 | { |
a5516438 | 403 | struct hstate *h = hstate_inode(inode); |
b45b5bd6 | 404 | struct address_space *mapping = &inode->i_data; |
a5516438 | 405 | const pgoff_t start = lstart >> huge_page_shift(h); |
b5cec28d MK |
406 | const pgoff_t end = lend >> huge_page_shift(h); |
407 | struct vm_area_struct pseudo_vma; | |
1da177e4 | 408 | struct pagevec pvec; |
d72dc8a2 | 409 | pgoff_t next, index; |
a43a8c39 | 410 | int i, freed = 0; |
b5cec28d | 411 | bool truncate_op = (lend == LLONG_MAX); |
1da177e4 | 412 | |
2c4541e2 | 413 | vma_init(&pseudo_vma, current->mm); |
b5cec28d | 414 | pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); |
86679820 | 415 | pagevec_init(&pvec); |
1da177e4 | 416 | next = start; |
b5cec28d | 417 | while (next < end) { |
b5cec28d | 418 | /* |
1817889e | 419 | * When no more pages are found, we are done. |
b5cec28d | 420 | */ |
397162ff | 421 | if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) |
1817889e | 422 | break; |
1da177e4 LT |
423 | |
424 | for (i = 0; i < pagevec_count(&pvec); ++i) { | |
425 | struct page *page = pvec.pages[i]; | |
e7c58097 | 426 | u32 hash; |
b5cec28d | 427 | |
d72dc8a2 | 428 | index = page->index; |
e7c58097 MK |
429 | hash = hugetlb_fault_mutex_hash(h, current->mm, |
430 | &pseudo_vma, | |
431 | mapping, index, 0); | |
432 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | |
433 | ||
4aae8d1c | 434 | /* |
e7c58097 MK |
435 | * If page is mapped, it was faulted in after being |
436 | * unmapped in caller. Unmap (again) now after taking | |
437 | * the fault mutex. The mutex will prevent faults | |
438 | * until we finish removing the page. | |
439 | * | |
440 | * This race can only happen in the hole punch case. | |
441 | * Getting here in a truncate operation is a bug. | |
4aae8d1c | 442 | */ |
e7c58097 MK |
443 | if (unlikely(page_mapped(page))) { |
444 | BUG_ON(truncate_op); | |
445 | ||
446 | i_mmap_lock_write(mapping); | |
447 | hugetlb_vmdelete_list(&mapping->i_mmap, | |
448 | index * pages_per_huge_page(h), | |
449 | (index + 1) * pages_per_huge_page(h)); | |
450 | i_mmap_unlock_write(mapping); | |
451 | } | |
4aae8d1c MK |
452 | |
453 | lock_page(page); | |
454 | /* | |
455 | * We must free the huge page and remove from page | |
456 | * cache (remove_huge_page) BEFORE removing the | |
457 | * region/reserve map (hugetlb_unreserve_pages). In | |
458 | * rare out of memory conditions, removal of the | |
72e2936c | 459 | * region/reserve map could fail. Correspondingly, |
460 | * the subpool and global reserve usage count can need | |
461 | * to be adjusted. | |
4aae8d1c | 462 | */ |
72e2936c | 463 | VM_BUG_ON(PagePrivate(page)); |
4aae8d1c MK |
464 | remove_huge_page(page); |
465 | freed++; | |
466 | if (!truncate_op) { | |
467 | if (unlikely(hugetlb_unreserve_pages(inode, | |
d72dc8a2 | 468 | index, index + 1, 1))) |
72e2936c | 469 | hugetlb_fix_reserve_counts(inode); |
b5cec28d MK |
470 | } |
471 | ||
1da177e4 | 472 | unlock_page(page); |
e7c58097 | 473 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
1da177e4 LT |
474 | } |
475 | huge_pagevec_release(&pvec); | |
1817889e | 476 | cond_resched(); |
1da177e4 | 477 | } |
b5cec28d MK |
478 | |
479 | if (truncate_op) | |
480 | (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); | |
1da177e4 LT |
481 | } |
482 | ||
2bbbda30 | 483 | static void hugetlbfs_evict_inode(struct inode *inode) |
1da177e4 | 484 | { |
9119a41e JK |
485 | struct resv_map *resv_map; |
486 | ||
b5cec28d | 487 | remove_inode_hugepages(inode, 0, LLONG_MAX); |
9119a41e JK |
488 | resv_map = (struct resv_map *)inode->i_mapping->private_data; |
489 | /* root inode doesn't have the resv_map, so we should check it */ | |
490 | if (resv_map) | |
491 | resv_map_release(&resv_map->refs); | |
dbd5768f | 492 | clear_inode(inode); |
149f4211 CH |
493 | } |
494 | ||
1da177e4 LT |
495 | static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) |
496 | { | |
856fc295 | 497 | pgoff_t pgoff; |
1da177e4 | 498 | struct address_space *mapping = inode->i_mapping; |
a5516438 | 499 | struct hstate *h = hstate_inode(inode); |
1da177e4 | 500 | |
a5516438 | 501 | BUG_ON(offset & ~huge_page_mask(h)); |
856fc295 | 502 | pgoff = offset >> PAGE_SHIFT; |
1da177e4 | 503 | |
7aa91e10 | 504 | i_size_write(inode, offset); |
83cde9e8 | 505 | i_mmap_lock_write(mapping); |
f808c13f | 506 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
1bfad99a | 507 | hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); |
c86aa7bb | 508 | i_mmap_unlock_write(mapping); |
e7c58097 | 509 | remove_inode_hugepages(inode, offset, LLONG_MAX); |
1da177e4 LT |
510 | return 0; |
511 | } | |
512 | ||
70c3547e MK |
513 | static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) |
514 | { | |
515 | struct hstate *h = hstate_inode(inode); | |
516 | loff_t hpage_size = huge_page_size(h); | |
517 | loff_t hole_start, hole_end; | |
518 | ||
519 | /* | |
520 | * For hole punch round up the beginning offset of the hole and | |
521 | * round down the end. | |
522 | */ | |
523 | hole_start = round_up(offset, hpage_size); | |
524 | hole_end = round_down(offset + len, hpage_size); | |
525 | ||
526 | if (hole_end > hole_start) { | |
527 | struct address_space *mapping = inode->i_mapping; | |
ff62a342 | 528 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
70c3547e | 529 | |
5955102c | 530 | inode_lock(inode); |
ff62a342 MAL |
531 | |
532 | /* protected by i_mutex */ | |
ab3948f5 | 533 | if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { |
ff62a342 MAL |
534 | inode_unlock(inode); |
535 | return -EPERM; | |
536 | } | |
537 | ||
70c3547e | 538 | i_mmap_lock_write(mapping); |
f808c13f | 539 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
70c3547e MK |
540 | hugetlb_vmdelete_list(&mapping->i_mmap, |
541 | hole_start >> PAGE_SHIFT, | |
542 | hole_end >> PAGE_SHIFT); | |
c86aa7bb | 543 | i_mmap_unlock_write(mapping); |
e7c58097 | 544 | remove_inode_hugepages(inode, hole_start, hole_end); |
5955102c | 545 | inode_unlock(inode); |
70c3547e MK |
546 | } |
547 | ||
548 | return 0; | |
549 | } | |
550 | ||
551 | static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, | |
552 | loff_t len) | |
553 | { | |
554 | struct inode *inode = file_inode(file); | |
ff62a342 | 555 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
70c3547e MK |
556 | struct address_space *mapping = inode->i_mapping; |
557 | struct hstate *h = hstate_inode(inode); | |
558 | struct vm_area_struct pseudo_vma; | |
559 | struct mm_struct *mm = current->mm; | |
560 | loff_t hpage_size = huge_page_size(h); | |
561 | unsigned long hpage_shift = huge_page_shift(h); | |
562 | pgoff_t start, index, end; | |
563 | int error; | |
564 | u32 hash; | |
565 | ||
566 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | |
567 | return -EOPNOTSUPP; | |
568 | ||
569 | if (mode & FALLOC_FL_PUNCH_HOLE) | |
570 | return hugetlbfs_punch_hole(inode, offset, len); | |
571 | ||
572 | /* | |
573 | * Default preallocate case. | |
574 | * For this range, start is rounded down and end is rounded up | |
575 | * as well as being converted to page offsets. | |
576 | */ | |
577 | start = offset >> hpage_shift; | |
578 | end = (offset + len + hpage_size - 1) >> hpage_shift; | |
579 | ||
5955102c | 580 | inode_lock(inode); |
70c3547e MK |
581 | |
582 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ | |
583 | error = inode_newsize_ok(inode, offset + len); | |
584 | if (error) | |
585 | goto out; | |
586 | ||
ff62a342 MAL |
587 | if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { |
588 | error = -EPERM; | |
589 | goto out; | |
590 | } | |
591 | ||
70c3547e MK |
592 | /* |
593 | * Initialize a pseudo vma as this is required by the huge page | |
594 | * allocation routines. If NUMA is configured, use page index | |
595 | * as input to create an allocation policy. | |
596 | */ | |
2c4541e2 | 597 | vma_init(&pseudo_vma, mm); |
70c3547e MK |
598 | pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); |
599 | pseudo_vma.vm_file = file; | |
600 | ||
601 | for (index = start; index < end; index++) { | |
602 | /* | |
603 | * This is supposed to be the vaddr where the page is being | |
604 | * faulted in, but we have no vaddr here. | |
605 | */ | |
606 | struct page *page; | |
607 | unsigned long addr; | |
608 | int avoid_reserve = 0; | |
609 | ||
610 | cond_resched(); | |
611 | ||
612 | /* | |
613 | * fallocate(2) manpage permits EINTR; we may have been | |
614 | * interrupted because we are using up too much memory. | |
615 | */ | |
616 | if (signal_pending(current)) { | |
617 | error = -EINTR; | |
618 | break; | |
619 | } | |
620 | ||
621 | /* Set numa allocation policy based on index */ | |
622 | hugetlb_set_vma_policy(&pseudo_vma, inode, index); | |
623 | ||
624 | /* addr is the offset within the file (zero based) */ | |
625 | addr = index * hpage_size; | |
626 | ||
e7c58097 | 627 | /* mutex taken here, fault path and hole punch */ |
70c3547e MK |
628 | hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, |
629 | index, addr); | |
630 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | |
631 | ||
632 | /* See if already present in mapping to avoid alloc/free */ | |
633 | page = find_get_page(mapping, index); | |
634 | if (page) { | |
635 | put_page(page); | |
636 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
637 | hugetlb_drop_vma_policy(&pseudo_vma); | |
638 | continue; | |
639 | } | |
640 | ||
641 | /* Allocate page and add to page cache */ | |
642 | page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve); | |
643 | hugetlb_drop_vma_policy(&pseudo_vma); | |
644 | if (IS_ERR(page)) { | |
645 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
646 | error = PTR_ERR(page); | |
647 | goto out; | |
648 | } | |
649 | clear_huge_page(page, addr, pages_per_huge_page(h)); | |
650 | __SetPageUptodate(page); | |
651 | error = huge_add_to_page_cache(page, mapping, index); | |
652 | if (unlikely(error)) { | |
653 | put_page(page); | |
654 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
655 | goto out; | |
656 | } | |
657 | ||
658 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
659 | ||
660 | /* | |
70c3547e | 661 | * unlock_page because locked by add_to_page_cache() |
72639e6d | 662 | * page_put due to reference from alloc_huge_page() |
70c3547e | 663 | */ |
70c3547e | 664 | unlock_page(page); |
72639e6d | 665 | put_page(page); |
70c3547e MK |
666 | } |
667 | ||
668 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) | |
669 | i_size_write(inode, offset + len); | |
078cd827 | 670 | inode->i_ctime = current_time(inode); |
70c3547e | 671 | out: |
5955102c | 672 | inode_unlock(inode); |
70c3547e MK |
673 | return error; |
674 | } | |
675 | ||
1da177e4 LT |
676 | static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) |
677 | { | |
2b0143b5 | 678 | struct inode *inode = d_inode(dentry); |
a5516438 | 679 | struct hstate *h = hstate_inode(inode); |
1da177e4 LT |
680 | int error; |
681 | unsigned int ia_valid = attr->ia_valid; | |
ff62a342 | 682 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
1da177e4 LT |
683 | |
684 | BUG_ON(!inode); | |
685 | ||
31051c85 | 686 | error = setattr_prepare(dentry, attr); |
1da177e4 | 687 | if (error) |
1025774c | 688 | return error; |
1da177e4 LT |
689 | |
690 | if (ia_valid & ATTR_SIZE) { | |
ff62a342 MAL |
691 | loff_t oldsize = inode->i_size; |
692 | loff_t newsize = attr->ia_size; | |
693 | ||
694 | if (newsize & ~huge_page_mask(h)) | |
1025774c | 695 | return -EINVAL; |
ff62a342 MAL |
696 | /* protected by i_mutex */ |
697 | if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || | |
698 | (newsize > oldsize && (info->seals & F_SEAL_GROW))) | |
699 | return -EPERM; | |
700 | error = hugetlb_vmtruncate(inode, newsize); | |
1da177e4 | 701 | if (error) |
1025774c | 702 | return error; |
1da177e4 | 703 | } |
1025774c CH |
704 | |
705 | setattr_copy(inode, attr); | |
706 | mark_inode_dirty(inode); | |
707 | return 0; | |
1da177e4 LT |
708 | } |
709 | ||
7d54fa64 AV |
710 | static struct inode *hugetlbfs_get_root(struct super_block *sb, |
711 | struct hugetlbfs_config *config) | |
1da177e4 LT |
712 | { |
713 | struct inode *inode; | |
1da177e4 LT |
714 | |
715 | inode = new_inode(sb); | |
716 | if (inode) { | |
85fe4025 | 717 | inode->i_ino = get_next_ino(); |
7d54fa64 AV |
718 | inode->i_mode = S_IFDIR | config->mode; |
719 | inode->i_uid = config->uid; | |
720 | inode->i_gid = config->gid; | |
078cd827 | 721 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); |
7d54fa64 AV |
722 | inode->i_op = &hugetlbfs_dir_inode_operations; |
723 | inode->i_fop = &simple_dir_operations; | |
724 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
725 | inc_nlink(inode); | |
65ed7601 | 726 | lockdep_annotate_inode_mutex_key(inode); |
7d54fa64 AV |
727 | } |
728 | return inode; | |
729 | } | |
730 | ||
b610ded7 | 731 | /* |
c8c06efa | 732 | * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never |
b610ded7 | 733 | * be taken from reclaim -- unlike regular filesystems. This needs an |
88f306b6 | 734 | * annotation because huge_pmd_share() does an allocation under hugetlb's |
c8c06efa | 735 | * i_mmap_rwsem. |
b610ded7 | 736 | */ |
c8c06efa | 737 | static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; |
b610ded7 | 738 | |
7d54fa64 AV |
739 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, |
740 | struct inode *dir, | |
18df2252 | 741 | umode_t mode, dev_t dev) |
7d54fa64 AV |
742 | { |
743 | struct inode *inode; | |
9119a41e JK |
744 | struct resv_map *resv_map; |
745 | ||
746 | resv_map = resv_map_alloc(); | |
747 | if (!resv_map) | |
748 | return NULL; | |
7d54fa64 AV |
749 | |
750 | inode = new_inode(sb); | |
751 | if (inode) { | |
ff62a342 MAL |
752 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
753 | ||
7d54fa64 AV |
754 | inode->i_ino = get_next_ino(); |
755 | inode_init_owner(inode, dir, mode); | |
c8c06efa DB |
756 | lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, |
757 | &hugetlbfs_i_mmap_rwsem_key); | |
1da177e4 | 758 | inode->i_mapping->a_ops = &hugetlbfs_aops; |
078cd827 | 759 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); |
9119a41e | 760 | inode->i_mapping->private_data = resv_map; |
ff62a342 | 761 | info->seals = F_SEAL_SEAL; |
1da177e4 LT |
762 | switch (mode & S_IFMT) { |
763 | default: | |
764 | init_special_inode(inode, mode, dev); | |
765 | break; | |
766 | case S_IFREG: | |
767 | inode->i_op = &hugetlbfs_inode_operations; | |
768 | inode->i_fop = &hugetlbfs_file_operations; | |
769 | break; | |
770 | case S_IFDIR: | |
771 | inode->i_op = &hugetlbfs_dir_inode_operations; | |
772 | inode->i_fop = &simple_dir_operations; | |
773 | ||
774 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
d8c76e6f | 775 | inc_nlink(inode); |
1da177e4 LT |
776 | break; |
777 | case S_IFLNK: | |
778 | inode->i_op = &page_symlink_inode_operations; | |
21fc61c7 | 779 | inode_nohighmem(inode); |
1da177e4 LT |
780 | break; |
781 | } | |
e096d0c7 | 782 | lockdep_annotate_inode_mutex_key(inode); |
9119a41e JK |
783 | } else |
784 | kref_put(&resv_map->refs, resv_map_release); | |
785 | ||
1da177e4 LT |
786 | return inode; |
787 | } | |
788 | ||
789 | /* | |
790 | * File creation. Allocate an inode, and we're done.. | |
791 | */ | |
792 | static int hugetlbfs_mknod(struct inode *dir, | |
1a67aafb | 793 | struct dentry *dentry, umode_t mode, dev_t dev) |
1da177e4 LT |
794 | { |
795 | struct inode *inode; | |
796 | int error = -ENOSPC; | |
7d54fa64 AV |
797 | |
798 | inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); | |
1da177e4 | 799 | if (inode) { |
078cd827 | 800 | dir->i_ctime = dir->i_mtime = current_time(dir); |
1da177e4 LT |
801 | d_instantiate(dentry, inode); |
802 | dget(dentry); /* Extra count - pin the dentry in core */ | |
803 | error = 0; | |
804 | } | |
805 | return error; | |
806 | } | |
807 | ||
18bb1db3 | 808 | static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) |
1da177e4 LT |
809 | { |
810 | int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0); | |
811 | if (!retval) | |
d8c76e6f | 812 | inc_nlink(dir); |
1da177e4 LT |
813 | return retval; |
814 | } | |
815 | ||
ebfc3b49 | 816 | static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) |
1da177e4 LT |
817 | { |
818 | return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0); | |
819 | } | |
820 | ||
821 | static int hugetlbfs_symlink(struct inode *dir, | |
822 | struct dentry *dentry, const char *symname) | |
823 | { | |
824 | struct inode *inode; | |
825 | int error = -ENOSPC; | |
1da177e4 | 826 | |
7d54fa64 | 827 | inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); |
1da177e4 LT |
828 | if (inode) { |
829 | int l = strlen(symname)+1; | |
830 | error = page_symlink(inode, symname, l); | |
831 | if (!error) { | |
832 | d_instantiate(dentry, inode); | |
833 | dget(dentry); | |
834 | } else | |
835 | iput(inode); | |
836 | } | |
078cd827 | 837 | dir->i_ctime = dir->i_mtime = current_time(dir); |
1da177e4 LT |
838 | |
839 | return error; | |
840 | } | |
841 | ||
842 | /* | |
6649a386 | 843 | * mark the head page dirty |
1da177e4 LT |
844 | */ |
845 | static int hugetlbfs_set_page_dirty(struct page *page) | |
846 | { | |
d85f3385 | 847 | struct page *head = compound_head(page); |
6649a386 KC |
848 | |
849 | SetPageDirty(head); | |
1da177e4 LT |
850 | return 0; |
851 | } | |
852 | ||
290408d4 | 853 | static int hugetlbfs_migrate_page(struct address_space *mapping, |
b969c4ab | 854 | struct page *newpage, struct page *page, |
a6bc32b8 | 855 | enum migrate_mode mode) |
290408d4 NH |
856 | { |
857 | int rc; | |
858 | ||
859 | rc = migrate_huge_page_move_mapping(mapping, newpage, page); | |
78bd5209 | 860 | if (rc != MIGRATEPAGE_SUCCESS) |
290408d4 | 861 | return rc; |
cb6acd01 MK |
862 | |
863 | /* | |
864 | * page_private is subpool pointer in hugetlb pages. Transfer to | |
865 | * new page. PagePrivate is not associated with page_private for | |
866 | * hugetlb pages and can not be set here as only page_huge_active | |
867 | * pages can be migrated. | |
868 | */ | |
869 | if (page_private(page)) { | |
870 | set_page_private(newpage, page_private(page)); | |
871 | set_page_private(page, 0); | |
872 | } | |
873 | ||
2916ecc0 JG |
874 | if (mode != MIGRATE_SYNC_NO_COPY) |
875 | migrate_page_copy(newpage, page); | |
876 | else | |
877 | migrate_page_states(newpage, page); | |
290408d4 | 878 | |
78bd5209 | 879 | return MIGRATEPAGE_SUCCESS; |
290408d4 NH |
880 | } |
881 | ||
78bb9203 NH |
882 | static int hugetlbfs_error_remove_page(struct address_space *mapping, |
883 | struct page *page) | |
884 | { | |
885 | struct inode *inode = mapping->host; | |
ab615a5b | 886 | pgoff_t index = page->index; |
78bb9203 NH |
887 | |
888 | remove_huge_page(page); | |
ab615a5b MK |
889 | if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) |
890 | hugetlb_fix_reserve_counts(inode); | |
891 | ||
78bb9203 NH |
892 | return 0; |
893 | } | |
894 | ||
4a25220d DH |
895 | /* |
896 | * Display the mount options in /proc/mounts. | |
897 | */ | |
898 | static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) | |
899 | { | |
900 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); | |
901 | struct hugepage_subpool *spool = sbinfo->spool; | |
902 | unsigned long hpage_size = huge_page_size(sbinfo->hstate); | |
903 | unsigned hpage_shift = huge_page_shift(sbinfo->hstate); | |
904 | char mod; | |
905 | ||
906 | if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) | |
907 | seq_printf(m, ",uid=%u", | |
908 | from_kuid_munged(&init_user_ns, sbinfo->uid)); | |
909 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) | |
910 | seq_printf(m, ",gid=%u", | |
911 | from_kgid_munged(&init_user_ns, sbinfo->gid)); | |
912 | if (sbinfo->mode != 0755) | |
913 | seq_printf(m, ",mode=%o", sbinfo->mode); | |
914 | if (sbinfo->max_inodes != -1) | |
915 | seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); | |
916 | ||
917 | hpage_size /= 1024; | |
918 | mod = 'K'; | |
919 | if (hpage_size >= 1024) { | |
920 | hpage_size /= 1024; | |
921 | mod = 'M'; | |
922 | } | |
923 | seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); | |
924 | if (spool) { | |
925 | if (spool->max_hpages != -1) | |
926 | seq_printf(m, ",size=%llu", | |
927 | (unsigned long long)spool->max_hpages << hpage_shift); | |
928 | if (spool->min_hpages != -1) | |
929 | seq_printf(m, ",min_size=%llu", | |
930 | (unsigned long long)spool->min_hpages << hpage_shift); | |
931 | } | |
932 | return 0; | |
933 | } | |
934 | ||
726c3342 | 935 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
1da177e4 | 936 | { |
726c3342 | 937 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); |
2b0143b5 | 938 | struct hstate *h = hstate_inode(d_inode(dentry)); |
1da177e4 LT |
939 | |
940 | buf->f_type = HUGETLBFS_MAGIC; | |
a5516438 | 941 | buf->f_bsize = huge_page_size(h); |
1da177e4 LT |
942 | if (sbinfo) { |
943 | spin_lock(&sbinfo->stat_lock); | |
74a8a65c DG |
944 | /* If no limits set, just report 0 for max/free/used |
945 | * blocks, like simple_statfs() */ | |
90481622 DG |
946 | if (sbinfo->spool) { |
947 | long free_pages; | |
948 | ||
949 | spin_lock(&sbinfo->spool->lock); | |
950 | buf->f_blocks = sbinfo->spool->max_hpages; | |
951 | free_pages = sbinfo->spool->max_hpages | |
952 | - sbinfo->spool->used_hpages; | |
953 | buf->f_bavail = buf->f_bfree = free_pages; | |
954 | spin_unlock(&sbinfo->spool->lock); | |
74a8a65c DG |
955 | buf->f_files = sbinfo->max_inodes; |
956 | buf->f_ffree = sbinfo->free_inodes; | |
957 | } | |
1da177e4 LT |
958 | spin_unlock(&sbinfo->stat_lock); |
959 | } | |
960 | buf->f_namelen = NAME_MAX; | |
961 | return 0; | |
962 | } | |
963 | ||
964 | static void hugetlbfs_put_super(struct super_block *sb) | |
965 | { | |
966 | struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); | |
967 | ||
968 | if (sbi) { | |
969 | sb->s_fs_info = NULL; | |
90481622 DG |
970 | |
971 | if (sbi->spool) | |
972 | hugepage_put_subpool(sbi->spool); | |
973 | ||
1da177e4 LT |
974 | kfree(sbi); |
975 | } | |
976 | } | |
977 | ||
96527980 CH |
978 | static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) |
979 | { | |
980 | if (sbinfo->free_inodes >= 0) { | |
981 | spin_lock(&sbinfo->stat_lock); | |
982 | if (unlikely(!sbinfo->free_inodes)) { | |
983 | spin_unlock(&sbinfo->stat_lock); | |
984 | return 0; | |
985 | } | |
986 | sbinfo->free_inodes--; | |
987 | spin_unlock(&sbinfo->stat_lock); | |
988 | } | |
989 | ||
990 | return 1; | |
991 | } | |
992 | ||
993 | static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) | |
994 | { | |
995 | if (sbinfo->free_inodes >= 0) { | |
996 | spin_lock(&sbinfo->stat_lock); | |
997 | sbinfo->free_inodes++; | |
998 | spin_unlock(&sbinfo->stat_lock); | |
999 | } | |
1000 | } | |
1001 | ||
1002 | ||
e18b890b | 1003 | static struct kmem_cache *hugetlbfs_inode_cachep; |
1da177e4 LT |
1004 | |
1005 | static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) | |
1006 | { | |
96527980 | 1007 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); |
1da177e4 LT |
1008 | struct hugetlbfs_inode_info *p; |
1009 | ||
96527980 CH |
1010 | if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) |
1011 | return NULL; | |
e94b1766 | 1012 | p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); |
96527980 CH |
1013 | if (unlikely(!p)) { |
1014 | hugetlbfs_inc_free_inodes(sbinfo); | |
1da177e4 | 1015 | return NULL; |
96527980 | 1016 | } |
4742a35d MK |
1017 | |
1018 | /* | |
1019 | * Any time after allocation, hugetlbfs_destroy_inode can be called | |
1020 | * for the inode. mpol_free_shared_policy is unconditionally called | |
1021 | * as part of hugetlbfs_destroy_inode. So, initialize policy here | |
1022 | * in case of a quick call to destroy. | |
1023 | * | |
1024 | * Note that the policy is initialized even if we are creating a | |
1025 | * private inode. This simplifies hugetlbfs_destroy_inode. | |
1026 | */ | |
1027 | mpol_shared_policy_init(&p->policy, NULL); | |
1028 | ||
1da177e4 LT |
1029 | return &p->vfs_inode; |
1030 | } | |
1031 | ||
fa0d7e3d NP |
1032 | static void hugetlbfs_i_callback(struct rcu_head *head) |
1033 | { | |
1034 | struct inode *inode = container_of(head, struct inode, i_rcu); | |
fa0d7e3d NP |
1035 | kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); |
1036 | } | |
1037 | ||
1da177e4 LT |
1038 | static void hugetlbfs_destroy_inode(struct inode *inode) |
1039 | { | |
96527980 | 1040 | hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); |
1da177e4 | 1041 | mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); |
fa0d7e3d | 1042 | call_rcu(&inode->i_rcu, hugetlbfs_i_callback); |
1da177e4 LT |
1043 | } |
1044 | ||
f5e54d6e | 1045 | static const struct address_space_operations hugetlbfs_aops = { |
800d15a5 NP |
1046 | .write_begin = hugetlbfs_write_begin, |
1047 | .write_end = hugetlbfs_write_end, | |
1da177e4 | 1048 | .set_page_dirty = hugetlbfs_set_page_dirty, |
290408d4 | 1049 | .migratepage = hugetlbfs_migrate_page, |
78bb9203 | 1050 | .error_remove_page = hugetlbfs_error_remove_page, |
1da177e4 LT |
1051 | }; |
1052 | ||
96527980 | 1053 | |
51cc5068 | 1054 | static void init_once(void *foo) |
96527980 CH |
1055 | { |
1056 | struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; | |
1057 | ||
a35afb83 | 1058 | inode_init_once(&ei->vfs_inode); |
96527980 CH |
1059 | } |
1060 | ||
4b6f5d20 | 1061 | const struct file_operations hugetlbfs_file_operations = { |
34d0640e | 1062 | .read_iter = hugetlbfs_read_iter, |
1da177e4 | 1063 | .mmap = hugetlbfs_file_mmap, |
1b061d92 | 1064 | .fsync = noop_fsync, |
1da177e4 | 1065 | .get_unmapped_area = hugetlb_get_unmapped_area, |
70c3547e MK |
1066 | .llseek = default_llseek, |
1067 | .fallocate = hugetlbfs_fallocate, | |
1da177e4 LT |
1068 | }; |
1069 | ||
92e1d5be | 1070 | static const struct inode_operations hugetlbfs_dir_inode_operations = { |
1da177e4 LT |
1071 | .create = hugetlbfs_create, |
1072 | .lookup = simple_lookup, | |
1073 | .link = simple_link, | |
1074 | .unlink = simple_unlink, | |
1075 | .symlink = hugetlbfs_symlink, | |
1076 | .mkdir = hugetlbfs_mkdir, | |
1077 | .rmdir = simple_rmdir, | |
1078 | .mknod = hugetlbfs_mknod, | |
1079 | .rename = simple_rename, | |
1080 | .setattr = hugetlbfs_setattr, | |
1081 | }; | |
1082 | ||
92e1d5be | 1083 | static const struct inode_operations hugetlbfs_inode_operations = { |
1da177e4 LT |
1084 | .setattr = hugetlbfs_setattr, |
1085 | }; | |
1086 | ||
ee9b6d61 | 1087 | static const struct super_operations hugetlbfs_ops = { |
1da177e4 LT |
1088 | .alloc_inode = hugetlbfs_alloc_inode, |
1089 | .destroy_inode = hugetlbfs_destroy_inode, | |
2bbbda30 | 1090 | .evict_inode = hugetlbfs_evict_inode, |
1da177e4 | 1091 | .statfs = hugetlbfs_statfs, |
1da177e4 | 1092 | .put_super = hugetlbfs_put_super, |
4a25220d | 1093 | .show_options = hugetlbfs_show_options, |
1da177e4 LT |
1094 | }; |
1095 | ||
4a25220d | 1096 | enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; |
7ca02d0a MK |
1097 | |
1098 | /* | |
1099 | * Convert size option passed from command line to number of huge pages | |
1100 | * in the pool specified by hstate. Size option could be in bytes | |
1101 | * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). | |
1102 | */ | |
4a25220d | 1103 | static long |
7ca02d0a | 1104 | hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, |
4a25220d | 1105 | enum hugetlbfs_size_type val_type) |
7ca02d0a MK |
1106 | { |
1107 | if (val_type == NO_SIZE) | |
1108 | return -1; | |
1109 | ||
1110 | if (val_type == SIZE_PERCENT) { | |
1111 | size_opt <<= huge_page_shift(h); | |
1112 | size_opt *= h->max_huge_pages; | |
1113 | do_div(size_opt, 100); | |
1114 | } | |
1115 | ||
1116 | size_opt >>= huge_page_shift(h); | |
1117 | return size_opt; | |
1118 | } | |
1119 | ||
1da177e4 LT |
1120 | static int |
1121 | hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) | |
1122 | { | |
e73a75fa RD |
1123 | char *p, *rest; |
1124 | substring_t args[MAX_OPT_ARGS]; | |
1125 | int option; | |
7ca02d0a | 1126 | unsigned long long max_size_opt = 0, min_size_opt = 0; |
4a25220d | 1127 | enum hugetlbfs_size_type max_val_type = NO_SIZE, min_val_type = NO_SIZE; |
1da177e4 LT |
1128 | |
1129 | if (!options) | |
1130 | return 0; | |
1da177e4 | 1131 | |
e73a75fa RD |
1132 | while ((p = strsep(&options, ",")) != NULL) { |
1133 | int token; | |
b4c07bce LS |
1134 | if (!*p) |
1135 | continue; | |
e73a75fa RD |
1136 | |
1137 | token = match_token(p, tokens, args); | |
1138 | switch (token) { | |
1139 | case Opt_uid: | |
1140 | if (match_int(&args[0], &option)) | |
1141 | goto bad_val; | |
a0eb3a05 EB |
1142 | pconfig->uid = make_kuid(current_user_ns(), option); |
1143 | if (!uid_valid(pconfig->uid)) | |
1144 | goto bad_val; | |
e73a75fa RD |
1145 | break; |
1146 | ||
1147 | case Opt_gid: | |
1148 | if (match_int(&args[0], &option)) | |
1149 | goto bad_val; | |
a0eb3a05 EB |
1150 | pconfig->gid = make_kgid(current_user_ns(), option); |
1151 | if (!gid_valid(pconfig->gid)) | |
1152 | goto bad_val; | |
e73a75fa RD |
1153 | break; |
1154 | ||
1155 | case Opt_mode: | |
1156 | if (match_octal(&args[0], &option)) | |
1157 | goto bad_val; | |
75897d60 | 1158 | pconfig->mode = option & 01777U; |
e73a75fa RD |
1159 | break; |
1160 | ||
1161 | case Opt_size: { | |
e73a75fa RD |
1162 | /* memparse() will accept a K/M/G without a digit */ |
1163 | if (!isdigit(*args[0].from)) | |
1164 | goto bad_val; | |
7ca02d0a MK |
1165 | max_size_opt = memparse(args[0].from, &rest); |
1166 | max_val_type = SIZE_STD; | |
a137e1cc | 1167 | if (*rest == '%') |
7ca02d0a | 1168 | max_val_type = SIZE_PERCENT; |
e73a75fa RD |
1169 | break; |
1170 | } | |
1da177e4 | 1171 | |
e73a75fa RD |
1172 | case Opt_nr_inodes: |
1173 | /* memparse() will accept a K/M/G without a digit */ | |
1174 | if (!isdigit(*args[0].from)) | |
1175 | goto bad_val; | |
1176 | pconfig->nr_inodes = memparse(args[0].from, &rest); | |
1177 | break; | |
1178 | ||
a137e1cc AK |
1179 | case Opt_pagesize: { |
1180 | unsigned long ps; | |
1181 | ps = memparse(args[0].from, &rest); | |
1182 | pconfig->hstate = size_to_hstate(ps); | |
1183 | if (!pconfig->hstate) { | |
9b857d26 | 1184 | pr_err("Unsupported page size %lu MB\n", |
a137e1cc AK |
1185 | ps >> 20); |
1186 | return -EINVAL; | |
1187 | } | |
1188 | break; | |
1189 | } | |
1190 | ||
7ca02d0a MK |
1191 | case Opt_min_size: { |
1192 | /* memparse() will accept a K/M/G without a digit */ | |
1193 | if (!isdigit(*args[0].from)) | |
1194 | goto bad_val; | |
1195 | min_size_opt = memparse(args[0].from, &rest); | |
1196 | min_val_type = SIZE_STD; | |
1197 | if (*rest == '%') | |
1198 | min_val_type = SIZE_PERCENT; | |
1199 | break; | |
1200 | } | |
1201 | ||
e73a75fa | 1202 | default: |
9b857d26 | 1203 | pr_err("Bad mount option: \"%s\"\n", p); |
b4c07bce | 1204 | return -EINVAL; |
e73a75fa RD |
1205 | break; |
1206 | } | |
1da177e4 | 1207 | } |
a137e1cc | 1208 | |
7ca02d0a MK |
1209 | /* |
1210 | * Use huge page pool size (in hstate) to convert the size | |
1211 | * options to number of huge pages. If NO_SIZE, -1 is returned. | |
1212 | */ | |
1213 | pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate, | |
1214 | max_size_opt, max_val_type); | |
1215 | pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate, | |
1216 | min_size_opt, min_val_type); | |
1217 | ||
1218 | /* | |
1219 | * If max_size was specified, then min_size must be smaller | |
1220 | */ | |
1221 | if (max_val_type > NO_SIZE && | |
1222 | pconfig->min_hpages > pconfig->max_hpages) { | |
1223 | pr_err("minimum size can not be greater than maximum size\n"); | |
1224 | return -EINVAL; | |
a137e1cc AK |
1225 | } |
1226 | ||
1da177e4 | 1227 | return 0; |
e73a75fa RD |
1228 | |
1229 | bad_val: | |
9b857d26 | 1230 | pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p); |
c12ddba0 | 1231 | return -EINVAL; |
1da177e4 LT |
1232 | } |
1233 | ||
1234 | static int | |
1235 | hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) | |
1236 | { | |
1da177e4 LT |
1237 | int ret; |
1238 | struct hugetlbfs_config config; | |
1239 | struct hugetlbfs_sb_info *sbinfo; | |
1240 | ||
7ca02d0a | 1241 | config.max_hpages = -1; /* No limit on size by default */ |
1da177e4 | 1242 | config.nr_inodes = -1; /* No limit on number of inodes by default */ |
77c70de1 DH |
1243 | config.uid = current_fsuid(); |
1244 | config.gid = current_fsgid(); | |
1da177e4 | 1245 | config.mode = 0755; |
a137e1cc | 1246 | config.hstate = &default_hstate; |
7ca02d0a | 1247 | config.min_hpages = -1; /* No default minimum size */ |
1da177e4 | 1248 | ret = hugetlbfs_parse_options(data, &config); |
1da177e4 LT |
1249 | if (ret) |
1250 | return ret; | |
1251 | ||
1252 | sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); | |
1253 | if (!sbinfo) | |
1254 | return -ENOMEM; | |
1255 | sb->s_fs_info = sbinfo; | |
a137e1cc | 1256 | sbinfo->hstate = config.hstate; |
1da177e4 | 1257 | spin_lock_init(&sbinfo->stat_lock); |
1da177e4 LT |
1258 | sbinfo->max_inodes = config.nr_inodes; |
1259 | sbinfo->free_inodes = config.nr_inodes; | |
90481622 | 1260 | sbinfo->spool = NULL; |
4a25220d DH |
1261 | sbinfo->uid = config.uid; |
1262 | sbinfo->gid = config.gid; | |
1263 | sbinfo->mode = config.mode; | |
1264 | ||
7ca02d0a MK |
1265 | /* |
1266 | * Allocate and initialize subpool if maximum or minimum size is | |
1267 | * specified. Any needed reservations (for minimim size) are taken | |
1268 | * taken when the subpool is created. | |
1269 | */ | |
1270 | if (config.max_hpages != -1 || config.min_hpages != -1) { | |
1271 | sbinfo->spool = hugepage_new_subpool(config.hstate, | |
1272 | config.max_hpages, | |
1273 | config.min_hpages); | |
90481622 DG |
1274 | if (!sbinfo->spool) |
1275 | goto out_free; | |
1276 | } | |
1da177e4 | 1277 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
a137e1cc AK |
1278 | sb->s_blocksize = huge_page_size(config.hstate); |
1279 | sb->s_blocksize_bits = huge_page_shift(config.hstate); | |
1da177e4 LT |
1280 | sb->s_magic = HUGETLBFS_MAGIC; |
1281 | sb->s_op = &hugetlbfs_ops; | |
1282 | sb->s_time_gran = 1; | |
48fde701 AV |
1283 | sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config)); |
1284 | if (!sb->s_root) | |
1da177e4 | 1285 | goto out_free; |
1da177e4 LT |
1286 | return 0; |
1287 | out_free: | |
6e6870d4 | 1288 | kfree(sbinfo->spool); |
1da177e4 LT |
1289 | kfree(sbinfo); |
1290 | return -ENOMEM; | |
1291 | } | |
1292 | ||
3c26ff6e AV |
1293 | static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type, |
1294 | int flags, const char *dev_name, void *data) | |
1da177e4 | 1295 | { |
3c26ff6e | 1296 | return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super); |
1da177e4 LT |
1297 | } |
1298 | ||
1299 | static struct file_system_type hugetlbfs_fs_type = { | |
1300 | .name = "hugetlbfs", | |
3c26ff6e | 1301 | .mount = hugetlbfs_mount, |
1da177e4 LT |
1302 | .kill_sb = kill_litter_super, |
1303 | }; | |
1304 | ||
42d7395f | 1305 | static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; |
1da177e4 | 1306 | |
ef1ff6b8 | 1307 | static int can_do_hugetlb_shm(void) |
1da177e4 | 1308 | { |
a0eb3a05 EB |
1309 | kgid_t shm_group; |
1310 | shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); | |
1311 | return capable(CAP_IPC_LOCK) || in_group_p(shm_group); | |
1da177e4 LT |
1312 | } |
1313 | ||
42d7395f AK |
1314 | static int get_hstate_idx(int page_size_log) |
1315 | { | |
af73e4d9 | 1316 | struct hstate *h = hstate_sizelog(page_size_log); |
42d7395f | 1317 | |
42d7395f AK |
1318 | if (!h) |
1319 | return -1; | |
1320 | return h - hstates; | |
1321 | } | |
1322 | ||
af73e4d9 NH |
1323 | /* |
1324 | * Note that size should be aligned to proper hugepage size in caller side, | |
1325 | * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. | |
1326 | */ | |
1327 | struct file *hugetlb_file_setup(const char *name, size_t size, | |
1328 | vm_flags_t acctflag, struct user_struct **user, | |
42d7395f | 1329 | int creat_flags, int page_size_log) |
1da177e4 | 1330 | { |
1da177e4 | 1331 | struct inode *inode; |
e68375c8 | 1332 | struct vfsmount *mnt; |
42d7395f | 1333 | int hstate_idx; |
e68375c8 | 1334 | struct file *file; |
42d7395f AK |
1335 | |
1336 | hstate_idx = get_hstate_idx(page_size_log); | |
1337 | if (hstate_idx < 0) | |
1338 | return ERR_PTR(-ENODEV); | |
1da177e4 | 1339 | |
353d5c30 | 1340 | *user = NULL; |
e68375c8 AV |
1341 | mnt = hugetlbfs_vfsmount[hstate_idx]; |
1342 | if (!mnt) | |
5bc98594 AM |
1343 | return ERR_PTR(-ENOENT); |
1344 | ||
ef1ff6b8 | 1345 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { |
353d5c30 HD |
1346 | *user = current_user(); |
1347 | if (user_shm_lock(size, *user)) { | |
21a3c273 | 1348 | task_lock(current); |
9b857d26 | 1349 | pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", |
21a3c273 DR |
1350 | current->comm, current->pid); |
1351 | task_unlock(current); | |
353d5c30 HD |
1352 | } else { |
1353 | *user = NULL; | |
2584e517 | 1354 | return ERR_PTR(-EPERM); |
353d5c30 | 1355 | } |
2584e517 | 1356 | } |
1da177e4 | 1357 | |
39b65252 | 1358 | file = ERR_PTR(-ENOSPC); |
e68375c8 | 1359 | inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0); |
1da177e4 | 1360 | if (!inode) |
e68375c8 | 1361 | goto out; |
e1832f29 SS |
1362 | if (creat_flags == HUGETLB_SHMFS_INODE) |
1363 | inode->i_flags |= S_PRIVATE; | |
1da177e4 | 1364 | |
1da177e4 | 1365 | inode->i_size = size; |
6d6b77f1 | 1366 | clear_nlink(inode); |
ce8d2cdf | 1367 | |
e68375c8 AV |
1368 | if (hugetlb_reserve_pages(inode, 0, |
1369 | size >> huge_page_shift(hstate_inode(inode)), NULL, | |
1370 | acctflag)) | |
1371 | file = ERR_PTR(-ENOMEM); | |
1372 | else | |
1373 | file = alloc_file_pseudo(inode, mnt, name, O_RDWR, | |
1374 | &hugetlbfs_file_operations); | |
1375 | if (!IS_ERR(file)) | |
1376 | return file; | |
1da177e4 | 1377 | |
b45b5bd6 | 1378 | iput(inode); |
e68375c8 | 1379 | out: |
353d5c30 HD |
1380 | if (*user) { |
1381 | user_shm_unlock(size, *user); | |
1382 | *user = NULL; | |
1383 | } | |
39b65252 | 1384 | return file; |
1da177e4 LT |
1385 | } |
1386 | ||
1387 | static int __init init_hugetlbfs_fs(void) | |
1388 | { | |
42d7395f | 1389 | struct hstate *h; |
1da177e4 | 1390 | int error; |
42d7395f | 1391 | int i; |
1da177e4 | 1392 | |
457c1b27 | 1393 | if (!hugepages_supported()) { |
9b857d26 | 1394 | pr_info("disabling because there are no supported hugepage sizes\n"); |
457c1b27 NA |
1395 | return -ENOTSUPP; |
1396 | } | |
1397 | ||
d1d5e05f | 1398 | error = -ENOMEM; |
1da177e4 LT |
1399 | hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", |
1400 | sizeof(struct hugetlbfs_inode_info), | |
5d097056 | 1401 | 0, SLAB_ACCOUNT, init_once); |
1da177e4 | 1402 | if (hugetlbfs_inode_cachep == NULL) |
e0bf68dd | 1403 | goto out2; |
1da177e4 LT |
1404 | |
1405 | error = register_filesystem(&hugetlbfs_fs_type); | |
1406 | if (error) | |
1407 | goto out; | |
1408 | ||
42d7395f AK |
1409 | i = 0; |
1410 | for_each_hstate(h) { | |
1411 | char buf[50]; | |
1412 | unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10); | |
1da177e4 | 1413 | |
42d7395f AK |
1414 | snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb); |
1415 | hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type, | |
1416 | buf); | |
1da177e4 | 1417 | |
42d7395f | 1418 | if (IS_ERR(hugetlbfs_vfsmount[i])) { |
9b857d26 | 1419 | pr_err("Cannot mount internal hugetlbfs for " |
42d7395f AK |
1420 | "page size %uK", ps_kb); |
1421 | error = PTR_ERR(hugetlbfs_vfsmount[i]); | |
1422 | hugetlbfs_vfsmount[i] = NULL; | |
1423 | } | |
1424 | i++; | |
1425 | } | |
1426 | /* Non default hstates are optional */ | |
1427 | if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx])) | |
1428 | return 0; | |
1da177e4 LT |
1429 | |
1430 | out: | |
d1d5e05f | 1431 | kmem_cache_destroy(hugetlbfs_inode_cachep); |
e0bf68dd | 1432 | out2: |
1da177e4 LT |
1433 | return error; |
1434 | } | |
3e89e1c5 | 1435 | fs_initcall(init_hugetlbfs_fs) |