1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* scm.c - Socket level control messages processing.
5 * Alignment and value checking mods by Craig Metz
8 #include <linux/module.h>
9 #include <linux/signal.h>
10 #include <linux/capability.h>
11 #include <linux/errno.h>
12 #include <linux/sched.h>
13 #include <linux/sched/user.h>
15 #include <linux/kernel.h>
16 #include <linux/stat.h>
17 #include <linux/socket.h>
18 #include <linux/file.h>
19 #include <linux/fcntl.h>
20 #include <linux/net.h>
21 #include <linux/interrupt.h>
22 #include <linux/netdevice.h>
23 #include <linux/security.h>
24 #include <linux/pid_namespace.h>
25 #include <linux/pid.h>
26 #include <linux/nsproxy.h>
27 #include <linux/slab.h>
28 #include <linux/errqueue.h>
29 #include <linux/io_uring.h>
31 #include <linux/uaccess.h>
33 #include <net/protocol.h>
34 #include <linux/skbuff.h>
36 #include <net/compat.h>
38 #include <net/cls_cgroup.h>
39 #include <net/af_unix.h>
43 * Only allow a user to send credentials, that they could set with
47 static __inline__ int scm_check_creds(struct ucred *creds)
49 const struct cred *cred = current_cred();
50 kuid_t uid = make_kuid(cred->user_ns, creds->uid);
51 kgid_t gid = make_kgid(cred->user_ns, creds->gid);
53 if (!uid_valid(uid) || !gid_valid(gid))
56 if ((creds->pid == task_tgid_vnr(current) ||
57 ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
58 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
59 uid_eq(uid, cred->suid)) || ns_capable(cred->user_ns, CAP_SETUID)) &&
60 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
61 gid_eq(gid, cred->sgid)) || ns_capable(cred->user_ns, CAP_SETGID))) {
67 static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
69 int *fdp = (int*)CMSG_DATA(cmsg);
70 struct scm_fp_list *fpl = *fplp;
74 num = (cmsg->cmsg_len - sizeof(struct cmsghdr))/sizeof(int);
84 fpl = kmalloc(sizeof(struct scm_fp_list), GFP_KERNEL_ACCOUNT);
90 fpl->max = SCM_MAX_FD;
92 #if IS_ENABLED(CONFIG_UNIX)
93 fpl->inflight = false;
96 INIT_LIST_HEAD(&fpl->vertices);
99 fpp = &fpl->fp[fpl->count];
101 if (fpl->count + num > fpl->max)
105 * Verify the descriptors and increment the usage count.
108 for (i=0; i< num; i++)
113 if (fd < 0 || !(file = fget_raw(fd)))
115 /* don't allow io_uring files */
116 if (io_is_uring_fops(file)) {
120 if (unix_get_socket(file))
128 fpl->user = get_uid(current_user());
133 void __scm_destroy(struct scm_cookie *scm)
135 struct scm_fp_list *fpl = scm->fp;
140 for (i=fpl->count-1; i>=0; i--)
146 EXPORT_SYMBOL(__scm_destroy);
148 int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
150 const struct proto_ops *ops = READ_ONCE(sock->ops);
151 struct cmsghdr *cmsg;
154 for_each_cmsghdr(cmsg, msg) {
157 /* Verify that cmsg_len is at least sizeof(struct cmsghdr) */
158 /* The first check was omitted in <= 2.2.5. The reasoning was
159 that parser checks cmsg_len in any case, so that
160 additional check would be work duplication.
161 But if cmsg_level is not SOL_SOCKET, we do not check
162 for too short ancillary data object at all! Oops.
165 if (!CMSG_OK(msg, cmsg))
168 if (cmsg->cmsg_level != SOL_SOCKET)
171 switch (cmsg->cmsg_type)
174 if (!ops || ops->family != PF_UNIX)
176 err=scm_fp_copy(cmsg, &p->fp);
180 case SCM_CREDENTIALS:
185 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred)))
187 memcpy(&creds, CMSG_DATA(cmsg), sizeof(struct ucred));
188 err = scm_check_creds(&creds);
192 p->creds.pid = creds.pid;
193 if (!p->pid || pid_vnr(p->pid) != creds.pid) {
196 pid = find_get_pid(creds.pid);
204 uid = make_kuid(current_user_ns(), creds.uid);
205 gid = make_kgid(current_user_ns(), creds.gid);
206 if (!uid_valid(uid) || !gid_valid(gid))
218 if (p->fp && !p->fp->count)
229 EXPORT_SYMBOL(__scm_send);
231 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
233 int cmlen = CMSG_LEN(len);
235 if (msg->msg_flags & MSG_CMSG_COMPAT)
236 return put_cmsg_compat(msg, level, type, len, data);
238 if (!msg->msg_control || msg->msg_controllen < sizeof(struct cmsghdr)) {
239 msg->msg_flags |= MSG_CTRUNC;
240 return 0; /* XXX: return error? check spec. */
242 if (msg->msg_controllen < cmlen) {
243 msg->msg_flags |= MSG_CTRUNC;
244 cmlen = msg->msg_controllen;
247 if (msg->msg_control_is_user) {
248 struct cmsghdr __user *cm = msg->msg_control_user;
250 check_object_size(data, cmlen - sizeof(*cm), true);
252 if (!user_write_access_begin(cm, cmlen))
255 unsafe_put_user(cmlen, &cm->cmsg_len, efault_end);
256 unsafe_put_user(level, &cm->cmsg_level, efault_end);
257 unsafe_put_user(type, &cm->cmsg_type, efault_end);
258 unsafe_copy_to_user(CMSG_USER_DATA(cm), data,
259 cmlen - sizeof(*cm), efault_end);
260 user_write_access_end();
262 struct cmsghdr *cm = msg->msg_control;
264 cm->cmsg_level = level;
265 cm->cmsg_type = type;
266 cm->cmsg_len = cmlen;
267 memcpy(CMSG_DATA(cm), data, cmlen - sizeof(*cm));
270 cmlen = min(CMSG_SPACE(len), msg->msg_controllen);
271 if (msg->msg_control_is_user)
272 msg->msg_control_user += cmlen;
274 msg->msg_control += cmlen;
275 msg->msg_controllen -= cmlen;
279 user_write_access_end();
283 EXPORT_SYMBOL(put_cmsg);
285 void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss_internal)
287 struct scm_timestamping64 tss;
290 for (i = 0; i < ARRAY_SIZE(tss.ts); i++) {
291 tss.ts[i].tv_sec = tss_internal->ts[i].tv_sec;
292 tss.ts[i].tv_nsec = tss_internal->ts[i].tv_nsec;
295 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPING_NEW, sizeof(tss), &tss);
297 EXPORT_SYMBOL(put_cmsg_scm_timestamping64);
299 void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_internal *tss_internal)
301 struct scm_timestamping tss;
304 for (i = 0; i < ARRAY_SIZE(tss.ts); i++) {
305 tss.ts[i].tv_sec = tss_internal->ts[i].tv_sec;
306 tss.ts[i].tv_nsec = tss_internal->ts[i].tv_nsec;
309 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPING_OLD, sizeof(tss), &tss);
311 EXPORT_SYMBOL(put_cmsg_scm_timestamping);
313 static int scm_max_fds(struct msghdr *msg)
315 if (msg->msg_controllen <= sizeof(struct cmsghdr))
317 return (msg->msg_controllen - sizeof(struct cmsghdr)) / sizeof(int);
320 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
322 struct cmsghdr __user *cm =
323 (__force struct cmsghdr __user *)msg->msg_control_user;
324 unsigned int o_flags = (msg->msg_flags & MSG_CMSG_CLOEXEC) ? O_CLOEXEC : 0;
325 int fdmax = min_t(int, scm_max_fds(msg), scm->fp->count);
326 int __user *cmsg_data = CMSG_USER_DATA(cm);
329 /* no use for FD passing from kernel space callers */
330 if (WARN_ON_ONCE(!msg->msg_control_is_user))
333 if (msg->msg_flags & MSG_CMSG_COMPAT) {
334 scm_detach_fds_compat(msg, scm);
338 for (i = 0; i < fdmax; i++) {
339 err = scm_recv_one_fd(scm->fp->fp[i], cmsg_data + i, o_flags);
345 int cmlen = CMSG_LEN(i * sizeof(int));
347 err = put_user(SOL_SOCKET, &cm->cmsg_level);
349 err = put_user(SCM_RIGHTS, &cm->cmsg_type);
351 err = put_user(cmlen, &cm->cmsg_len);
353 cmlen = CMSG_SPACE(i * sizeof(int));
354 if (msg->msg_controllen < cmlen)
355 cmlen = msg->msg_controllen;
356 msg->msg_control_user += cmlen;
357 msg->msg_controllen -= cmlen;
361 if (i < scm->fp->count || (scm->fp->count && fdmax <= 0))
362 msg->msg_flags |= MSG_CTRUNC;
365 * All of the files that fit in the message have had their usage counts
366 * incremented, so we just free the list.
370 EXPORT_SYMBOL(scm_detach_fds);
372 struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
374 struct scm_fp_list *new_fpl;
380 new_fpl = kmemdup(fpl, offsetof(struct scm_fp_list, fp[fpl->count]),
383 for (i = 0; i < fpl->count; i++)
384 get_file(fpl->fp[i]);
386 new_fpl->max = new_fpl->count;
387 new_fpl->user = get_uid(fpl->user);
388 #if IS_ENABLED(CONFIG_UNIX)
389 new_fpl->inflight = false;
390 new_fpl->edges = NULL;
391 INIT_LIST_HEAD(&new_fpl->vertices);
396 EXPORT_SYMBOL(scm_fp_dup);