5 * File handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998-1999 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
19 * 10/02/98 dgb Attempt to integrate into udf.o
20 * 10/07/98 Switched to using generic_readpage, etc., like isofs
22 * 12/06/98 blf Added udf_file_read. uses generic_file_read for all cases but
23 * ICBTAG_FLAG_AD_IN_ICB.
24 * 04/06/99 64 bit file handling on 32 bit systems taken from ext2 file.c
25 * 05/12/99 Preliminary file write support
30 #include <linux/uaccess.h>
31 #include <linux/kernel.h>
32 #include <linux/string.h> /* memset */
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/pagemap.h>
36 #include <linux/uio.h>
41 static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
43 struct vm_area_struct *vma = vmf->vma;
44 struct inode *inode = file_inode(vma->vm_file);
45 struct address_space *mapping = inode->i_mapping;
46 struct page *page = vmf->page;
49 vm_fault_t ret = VM_FAULT_LOCKED;
52 sb_start_pagefault(inode->i_sb);
53 file_update_time(vma->vm_file);
54 filemap_invalidate_lock_shared(mapping);
56 size = i_size_read(inode);
57 if (page->mapping != inode->i_mapping || page_offset(page) >= size) {
59 ret = VM_FAULT_NOPAGE;
62 /* Space is already allocated for in-ICB file */
63 if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
65 if (page->index == size >> PAGE_SHIFT)
66 end = size & ~PAGE_MASK;
69 err = __block_write_begin(page, 0, end, udf_get_block);
71 err = block_commit_write(page, 0, end);
74 ret = block_page_mkwrite_return(err);
79 wait_for_stable_page(page);
81 filemap_invalidate_unlock_shared(mapping);
82 sb_end_pagefault(inode->i_sb);
86 static const struct vm_operations_struct udf_file_vm_ops = {
87 .fault = filemap_fault,
88 .map_pages = filemap_map_pages,
89 .page_mkwrite = udf_page_mkwrite,
92 static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
95 struct file *file = iocb->ki_filp;
96 struct inode *inode = file_inode(file);
97 struct udf_inode_info *iinfo = UDF_I(inode);
101 retval = generic_write_checks(iocb, from);
105 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
106 inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
107 iocb->ki_pos + iov_iter_count(from))) {
108 filemap_invalidate_lock(inode->i_mapping);
109 retval = udf_expand_file_adinicb(inode);
110 filemap_invalidate_unlock(inode->i_mapping);
115 retval = __generic_file_write_iter(iocb, from);
117 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && retval > 0) {
118 down_write(&iinfo->i_data_sem);
119 iinfo->i_lenAlloc = inode->i_size;
120 up_write(&iinfo->i_data_sem);
125 mark_inode_dirty(inode);
126 retval = generic_write_sync(iocb, retval);
132 long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
134 struct inode *inode = file_inode(filp);
135 long old_block, new_block;
138 if (file_permission(filp, MAY_READ) != 0) {
139 udf_debug("no permission to access inode %lu\n", inode->i_ino);
143 if (!arg && ((cmd == UDF_GETVOLIDENT) || (cmd == UDF_GETEASIZE) ||
144 (cmd == UDF_RELOCATE_BLOCKS) || (cmd == UDF_GETEABLOCK))) {
145 udf_debug("invalid argument to udf_ioctl\n");
150 case UDF_GETVOLIDENT:
151 if (copy_to_user((char __user *)arg,
152 UDF_SB(inode->i_sb)->s_volume_ident, 32))
155 case UDF_RELOCATE_BLOCKS:
156 if (!capable(CAP_SYS_ADMIN))
158 if (get_user(old_block, (long __user *)arg))
160 result = udf_relocate_blocks(inode->i_sb,
161 old_block, &new_block);
163 result = put_user(new_block, (long __user *)arg);
166 return put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg);
168 return copy_to_user((char __user *)arg,
169 UDF_I(inode)->i_data,
170 UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0;
178 static int udf_release_file(struct inode *inode, struct file *filp)
180 if (filp->f_mode & FMODE_WRITE &&
181 atomic_read(&inode->i_writecount) == 1) {
183 * Grab i_mutex to avoid races with writes changing i_size
184 * while we are running.
187 down_write(&UDF_I(inode)->i_data_sem);
188 udf_discard_prealloc(inode);
189 udf_truncate_tail_extent(inode);
190 up_write(&UDF_I(inode)->i_data_sem);
196 static int udf_file_mmap(struct file *file, struct vm_area_struct *vma)
199 vma->vm_ops = &udf_file_vm_ops;
204 const struct file_operations udf_file_operations = {
205 .read_iter = generic_file_read_iter,
206 .unlocked_ioctl = udf_ioctl,
207 .open = generic_file_open,
208 .mmap = udf_file_mmap,
209 .write_iter = udf_file_write_iter,
210 .release = udf_release_file,
211 .fsync = generic_file_fsync,
212 .splice_read = generic_file_splice_read,
213 .splice_write = iter_file_splice_write,
214 .llseek = generic_file_llseek,
217 static int udf_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
220 struct inode *inode = d_inode(dentry);
221 struct super_block *sb = inode->i_sb;
224 error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
228 if ((attr->ia_valid & ATTR_UID) &&
229 UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET) &&
230 !uid_eq(attr->ia_uid, UDF_SB(sb)->s_uid))
232 if ((attr->ia_valid & ATTR_GID) &&
233 UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET) &&
234 !gid_eq(attr->ia_gid, UDF_SB(sb)->s_gid))
237 if ((attr->ia_valid & ATTR_SIZE) &&
238 attr->ia_size != i_size_read(inode)) {
239 error = udf_setsize(inode, attr->ia_size);
244 if (attr->ia_valid & ATTR_MODE)
245 udf_update_extra_perms(inode, attr->ia_mode);
247 setattr_copy(&nop_mnt_idmap, inode, attr);
248 mark_inode_dirty(inode);
252 const struct inode_operations udf_file_inode_operations = {
253 .setattr = udf_setattr,