1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2004-2005 Silicon Graphics, Inc.
6 #include <linux/compat.h>
7 #include <linux/ioctl.h>
8 #include <linux/mount.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
11 #include <linux/fsmap.h>
14 #include "xfs_format.h"
15 #include "xfs_log_format.h"
16 #include "xfs_trans_resv.h"
17 #include "xfs_mount.h"
18 #include "xfs_inode.h"
19 #include "xfs_itable.h"
20 #include "xfs_error.h"
21 #include "xfs_fsops.h"
22 #include "xfs_alloc.h"
23 #include "xfs_rtalloc.h"
25 #include "xfs_ioctl.h"
26 #include "xfs_ioctl32.h"
27 #include "xfs_trace.h"
30 #define _NATIVE_IOC(cmd, type) \
31 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
33 #ifdef BROKEN_X86_ALIGNMENT
35 xfs_compat_flock64_copyin(
37 compat_xfs_flock64_t __user *arg32)
39 if (get_user(bf->l_type, &arg32->l_type) ||
40 get_user(bf->l_whence, &arg32->l_whence) ||
41 get_user(bf->l_start, &arg32->l_start) ||
42 get_user(bf->l_len, &arg32->l_len) ||
43 get_user(bf->l_sysid, &arg32->l_sysid) ||
44 get_user(bf->l_pid, &arg32->l_pid) ||
45 copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32)))
51 xfs_compat_ioc_fsgeometry_v1(
53 compat_xfs_fsop_geom_v1_t __user *arg32)
55 xfs_fsop_geom_t fsgeo;
58 error = xfs_fs_geometry(&mp->m_sb, &fsgeo, 3);
61 /* The 32-bit variant simply has some padding at the end */
62 if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1)))
68 xfs_compat_growfs_data_copyin(
69 struct xfs_growfs_data *in,
70 compat_xfs_growfs_data_t __user *arg32)
72 if (get_user(in->newblocks, &arg32->newblocks) ||
73 get_user(in->imaxpct, &arg32->imaxpct))
79 xfs_compat_growfs_rt_copyin(
80 struct xfs_growfs_rt *in,
81 compat_xfs_growfs_rt_t __user *arg32)
83 if (get_user(in->newblocks, &arg32->newblocks) ||
84 get_user(in->extsize, &arg32->extsize))
90 xfs_inumbers_fmt_compat(
92 const struct xfs_inogrp *buffer,
96 compat_xfs_inogrp_t __user *p32 = ubuffer;
99 for (i = 0; i < count; i++) {
100 if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) ||
101 put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) ||
102 put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask))
105 *written = count * sizeof(*p32);
110 #define xfs_inumbers_fmt_compat xfs_inumbers_fmt
111 #endif /* BROKEN_X86_ALIGNMENT */
114 xfs_ioctl32_bstime_copyin(
115 xfs_bstime_t *bstime,
116 compat_xfs_bstime_t __user *bstime32)
118 compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */
120 if (get_user(sec32, &bstime32->tv_sec) ||
121 get_user(bstime->tv_nsec, &bstime32->tv_nsec))
123 bstime->tv_sec = sec32;
127 /* xfs_bstat_t has differing alignment on intel, & bstime_t sizes everywhere */
129 xfs_ioctl32_bstat_copyin(
131 compat_xfs_bstat_t __user *bstat32)
133 if (get_user(bstat->bs_ino, &bstat32->bs_ino) ||
134 get_user(bstat->bs_mode, &bstat32->bs_mode) ||
135 get_user(bstat->bs_nlink, &bstat32->bs_nlink) ||
136 get_user(bstat->bs_uid, &bstat32->bs_uid) ||
137 get_user(bstat->bs_gid, &bstat32->bs_gid) ||
138 get_user(bstat->bs_rdev, &bstat32->bs_rdev) ||
139 get_user(bstat->bs_blksize, &bstat32->bs_blksize) ||
140 get_user(bstat->bs_size, &bstat32->bs_size) ||
141 xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) ||
142 xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) ||
143 xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) ||
144 get_user(bstat->bs_blocks, &bstat32->bs_size) ||
145 get_user(bstat->bs_xflags, &bstat32->bs_size) ||
146 get_user(bstat->bs_extsize, &bstat32->bs_extsize) ||
147 get_user(bstat->bs_extents, &bstat32->bs_extents) ||
148 get_user(bstat->bs_gen, &bstat32->bs_gen) ||
149 get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) ||
150 get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) ||
151 get_user(bstat->bs_forkoff, &bstat32->bs_forkoff) ||
152 get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) ||
153 get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) ||
154 get_user(bstat->bs_aextents, &bstat32->bs_aextents))
159 /* XFS_IOC_FSBULKSTAT and friends */
162 xfs_bstime_store_compat(
163 compat_xfs_bstime_t __user *p32,
164 const xfs_bstime_t *p)
169 if (put_user(sec32, &p32->tv_sec) ||
170 put_user(p->tv_nsec, &p32->tv_nsec))
175 /* Return 0 on success or positive error (to xfs_bulkstat()) */
177 xfs_bulkstat_one_fmt_compat(
178 void __user *ubuffer,
181 const xfs_bstat_t *buffer)
183 compat_xfs_bstat_t __user *p32 = ubuffer;
185 if (ubsize < sizeof(*p32))
188 if (put_user(buffer->bs_ino, &p32->bs_ino) ||
189 put_user(buffer->bs_mode, &p32->bs_mode) ||
190 put_user(buffer->bs_nlink, &p32->bs_nlink) ||
191 put_user(buffer->bs_uid, &p32->bs_uid) ||
192 put_user(buffer->bs_gid, &p32->bs_gid) ||
193 put_user(buffer->bs_rdev, &p32->bs_rdev) ||
194 put_user(buffer->bs_blksize, &p32->bs_blksize) ||
195 put_user(buffer->bs_size, &p32->bs_size) ||
196 xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) ||
197 xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) ||
198 xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) ||
199 put_user(buffer->bs_blocks, &p32->bs_blocks) ||
200 put_user(buffer->bs_xflags, &p32->bs_xflags) ||
201 put_user(buffer->bs_extsize, &p32->bs_extsize) ||
202 put_user(buffer->bs_extents, &p32->bs_extents) ||
203 put_user(buffer->bs_gen, &p32->bs_gen) ||
204 put_user(buffer->bs_projid, &p32->bs_projid) ||
205 put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) ||
206 put_user(buffer->bs_forkoff, &p32->bs_forkoff) ||
207 put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
208 put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
209 put_user(buffer->bs_aextents, &p32->bs_aextents))
212 *ubused = sizeof(*p32);
217 xfs_bulkstat_one_compat(
218 xfs_mount_t *mp, /* mount point for filesystem */
219 xfs_ino_t ino, /* inode number to get data for */
220 void __user *buffer, /* buffer to place output in */
221 int ubsize, /* size of buffer */
222 int *ubused, /* bytes used by me */
223 int *stat) /* BULKSTAT_RV_... */
225 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
226 xfs_bulkstat_one_fmt_compat,
230 /* copied from xfs_ioctl.c */
232 xfs_compat_ioc_bulkstat(
235 compat_xfs_fsop_bulkreq_t __user *p32)
238 xfs_fsop_bulkreq_t bulkreq;
239 int count; /* # of records returned */
240 xfs_ino_t inlast; /* last inode number */
244 /* done = 1 if there are more stats to get and if bulkstat */
245 /* should be called again (unused here, but used in dmapi) */
247 if (!capable(CAP_SYS_ADMIN))
250 if (XFS_FORCED_SHUTDOWN(mp))
253 if (get_user(addr, &p32->lastip))
255 bulkreq.lastip = compat_ptr(addr);
256 if (get_user(bulkreq.icount, &p32->icount) ||
257 get_user(addr, &p32->ubuffer))
259 bulkreq.ubuffer = compat_ptr(addr);
260 if (get_user(addr, &p32->ocount))
262 bulkreq.ocount = compat_ptr(addr);
264 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
267 if ((count = bulkreq.icount) <= 0)
270 if (bulkreq.ubuffer == NULL)
273 if (cmd == XFS_IOC_FSINUMBERS_32) {
274 error = xfs_inumbers(mp, &inlast, &count,
275 bulkreq.ubuffer, xfs_inumbers_fmt_compat);
276 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) {
279 error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer,
280 sizeof(compat_xfs_bstat_t), NULL, &res);
281 } else if (cmd == XFS_IOC_FSBULKSTAT_32) {
282 error = xfs_bulkstat(mp, &inlast, &count,
283 xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t),
284 bulkreq.ubuffer, &done);
290 if (bulkreq.ocount != NULL) {
291 if (copy_to_user(bulkreq.lastip, &inlast,
295 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
303 xfs_compat_handlereq_copyin(
304 xfs_fsop_handlereq_t *hreq,
305 compat_xfs_fsop_handlereq_t __user *arg32)
307 compat_xfs_fsop_handlereq_t hreq32;
309 if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t)))
312 hreq->fd = hreq32.fd;
313 hreq->path = compat_ptr(hreq32.path);
314 hreq->oflags = hreq32.oflags;
315 hreq->ihandle = compat_ptr(hreq32.ihandle);
316 hreq->ihandlen = hreq32.ihandlen;
317 hreq->ohandle = compat_ptr(hreq32.ohandle);
318 hreq->ohandlen = compat_ptr(hreq32.ohandlen);
323 STATIC struct dentry *
324 xfs_compat_handlereq_to_dentry(
325 struct file *parfilp,
326 compat_xfs_fsop_handlereq_t *hreq)
328 return xfs_handle_to_dentry(parfilp,
329 compat_ptr(hreq->ihandle), hreq->ihandlen);
333 xfs_compat_attrlist_by_handle(
334 struct file *parfilp,
338 attrlist_cursor_kern_t *cursor;
339 compat_xfs_fsop_attrlist_handlereq_t al_hreq;
340 struct dentry *dentry;
343 if (!capable(CAP_SYS_ADMIN))
345 if (copy_from_user(&al_hreq, arg,
346 sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
348 if (al_hreq.buflen < sizeof(struct attrlist) ||
349 al_hreq.buflen > XFS_XATTR_LIST_MAX)
353 * Reject flags, only allow namespaces.
355 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
358 dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq);
360 return PTR_ERR(dentry);
363 kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP);
367 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
368 error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen,
369 al_hreq.flags, cursor);
373 if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
384 xfs_compat_attrmulti_by_handle(
385 struct file *parfilp,
389 compat_xfs_attr_multiop_t *ops;
390 compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
391 struct dentry *dentry;
392 unsigned int i, size;
393 unsigned char *attr_name;
395 if (!capable(CAP_SYS_ADMIN))
397 if (copy_from_user(&am_hreq, arg,
398 sizeof(compat_xfs_fsop_attrmulti_handlereq_t)))
402 if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t))
405 dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq);
407 return PTR_ERR(dentry);
410 size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t);
411 if (!size || size > 16 * PAGE_SIZE)
414 ops = memdup_user(compat_ptr(am_hreq.ops), size);
416 error = PTR_ERR(ops);
421 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
426 for (i = 0; i < am_hreq.opcount; i++) {
427 ops[i].am_error = strncpy_from_user((char *)attr_name,
428 compat_ptr(ops[i].am_attrname),
430 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
432 if (ops[i].am_error < 0)
435 switch (ops[i].am_opcode) {
437 ops[i].am_error = xfs_attrmulti_attr_get(
438 d_inode(dentry), attr_name,
439 compat_ptr(ops[i].am_attrvalue),
440 &ops[i].am_length, ops[i].am_flags);
443 ops[i].am_error = mnt_want_write_file(parfilp);
446 ops[i].am_error = xfs_attrmulti_attr_set(
447 d_inode(dentry), attr_name,
448 compat_ptr(ops[i].am_attrvalue),
449 ops[i].am_length, ops[i].am_flags);
450 mnt_drop_write_file(parfilp);
453 ops[i].am_error = mnt_want_write_file(parfilp);
456 ops[i].am_error = xfs_attrmulti_attr_remove(
457 d_inode(dentry), attr_name,
459 mnt_drop_write_file(parfilp);
462 ops[i].am_error = -EINVAL;
466 if (copy_to_user(compat_ptr(am_hreq.ops), ops, size))
478 xfs_compat_fssetdm_by_handle(
479 struct file *parfilp,
483 struct fsdmidata fsd;
484 compat_xfs_fsop_setdm_handlereq_t dmhreq;
485 struct dentry *dentry;
487 if (!capable(CAP_MKNOD))
489 if (copy_from_user(&dmhreq, arg,
490 sizeof(compat_xfs_fsop_setdm_handlereq_t)))
493 dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq);
495 return PTR_ERR(dentry);
497 if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) {
502 if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) {
507 error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask,
516 xfs_file_compat_ioctl(
521 struct inode *inode = file_inode(filp);
522 struct xfs_inode *ip = XFS_I(inode);
523 struct xfs_mount *mp = ip->i_mount;
524 void __user *arg = (void __user *)p;
527 trace_xfs_file_compat_ioctl(ip);
530 /* No size or alignment issues on any arch */
531 case XFS_IOC_DIOINFO:
532 case XFS_IOC_FSGEOMETRY:
533 case XFS_IOC_FSGETXATTR:
534 case XFS_IOC_FSSETXATTR:
535 case XFS_IOC_FSGETXATTRA:
536 case XFS_IOC_FSSETDM:
537 case XFS_IOC_GETBMAP:
538 case XFS_IOC_GETBMAPA:
539 case XFS_IOC_GETBMAPX:
540 case XFS_IOC_FSCOUNTS:
541 case XFS_IOC_SET_RESBLKS:
542 case XFS_IOC_GET_RESBLKS:
543 case XFS_IOC_FSGROWFSLOG:
544 case XFS_IOC_GOINGDOWN:
545 case XFS_IOC_ERROR_INJECTION:
546 case XFS_IOC_ERROR_CLEARALL:
547 case FS_IOC_GETFSMAP:
548 case XFS_IOC_SCRUB_METADATA:
549 return xfs_file_ioctl(filp, cmd, p);
550 #ifndef BROKEN_X86_ALIGNMENT
551 /* These are handled fine if no alignment issues */
552 case XFS_IOC_ALLOCSP:
555 case XFS_IOC_UNRESVSP:
556 case XFS_IOC_ALLOCSP64:
557 case XFS_IOC_FREESP64:
558 case XFS_IOC_RESVSP64:
559 case XFS_IOC_UNRESVSP64:
560 case XFS_IOC_FSGEOMETRY_V1:
561 case XFS_IOC_FSGROWFSDATA:
562 case XFS_IOC_FSGROWFSRT:
563 case XFS_IOC_ZERO_RANGE:
564 return xfs_file_ioctl(filp, cmd, p);
566 case XFS_IOC_ALLOCSP_32:
567 case XFS_IOC_FREESP_32:
568 case XFS_IOC_ALLOCSP64_32:
569 case XFS_IOC_FREESP64_32:
570 case XFS_IOC_RESVSP_32:
571 case XFS_IOC_UNRESVSP_32:
572 case XFS_IOC_RESVSP64_32:
573 case XFS_IOC_UNRESVSP64_32:
574 case XFS_IOC_ZERO_RANGE_32: {
575 struct xfs_flock64 bf;
577 if (xfs_compat_flock64_copyin(&bf, arg))
579 cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
580 return xfs_ioc_space(filp, cmd, &bf);
582 case XFS_IOC_FSGEOMETRY_V1_32:
583 return xfs_compat_ioc_fsgeometry_v1(mp, arg);
584 case XFS_IOC_FSGROWFSDATA_32: {
585 struct xfs_growfs_data in;
587 if (xfs_compat_growfs_data_copyin(&in, arg))
589 error = mnt_want_write_file(filp);
592 error = xfs_growfs_data(mp, &in);
593 mnt_drop_write_file(filp);
596 case XFS_IOC_FSGROWFSRT_32: {
597 struct xfs_growfs_rt in;
599 if (xfs_compat_growfs_rt_copyin(&in, arg))
601 error = mnt_want_write_file(filp);
604 error = xfs_growfs_rt(mp, &in);
605 mnt_drop_write_file(filp);
609 /* long changes size, but xfs only copiese out 32 bits */
610 case XFS_IOC_GETXFLAGS_32:
611 case XFS_IOC_SETXFLAGS_32:
612 case XFS_IOC_GETVERSION_32:
613 cmd = _NATIVE_IOC(cmd, long);
614 return xfs_file_ioctl(filp, cmd, p);
615 case XFS_IOC_SWAPEXT_32: {
616 struct xfs_swapext sxp;
617 struct compat_xfs_swapext __user *sxu = arg;
619 /* Bulk copy in up to the sx_stat field, then copy bstat */
620 if (copy_from_user(&sxp, sxu,
621 offsetof(struct xfs_swapext, sx_stat)) ||
622 xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat))
624 error = mnt_want_write_file(filp);
627 error = xfs_ioc_swapext(&sxp);
628 mnt_drop_write_file(filp);
631 case XFS_IOC_FSBULKSTAT_32:
632 case XFS_IOC_FSBULKSTAT_SINGLE_32:
633 case XFS_IOC_FSINUMBERS_32:
634 return xfs_compat_ioc_bulkstat(mp, cmd, arg);
635 case XFS_IOC_FD_TO_HANDLE_32:
636 case XFS_IOC_PATH_TO_HANDLE_32:
637 case XFS_IOC_PATH_TO_FSHANDLE_32: {
638 struct xfs_fsop_handlereq hreq;
640 if (xfs_compat_handlereq_copyin(&hreq, arg))
642 cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq);
643 return xfs_find_handle(cmd, &hreq);
645 case XFS_IOC_OPEN_BY_HANDLE_32: {
646 struct xfs_fsop_handlereq hreq;
648 if (xfs_compat_handlereq_copyin(&hreq, arg))
650 return xfs_open_by_handle(filp, &hreq);
652 case XFS_IOC_READLINK_BY_HANDLE_32: {
653 struct xfs_fsop_handlereq hreq;
655 if (xfs_compat_handlereq_copyin(&hreq, arg))
657 return xfs_readlink_by_handle(filp, &hreq);
659 case XFS_IOC_ATTRLIST_BY_HANDLE_32:
660 return xfs_compat_attrlist_by_handle(filp, arg);
661 case XFS_IOC_ATTRMULTI_BY_HANDLE_32:
662 return xfs_compat_attrmulti_by_handle(filp, arg);
663 case XFS_IOC_FSSETDM_BY_HANDLE_32:
664 return xfs_compat_fssetdm_by_handle(filp, arg);