1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
7 #include <linux/sched/signal.h>
10 /* We make this a static variable rather than a part of the superblock; it
11 * is better if we don't reassign numbers easily even across filesystems
13 static autofs_wqt_t autofs_next_wait_queue = 1;
15 void autofs_catatonic_mode(struct autofs_sb_info *sbi)
17 struct autofs_wait_queue *wq, *nwq;
19 mutex_lock(&sbi->wq_mutex);
20 if (sbi->flags & AUTOFS_SBI_CATATONIC) {
21 mutex_unlock(&sbi->wq_mutex);
25 pr_debug("entering catatonic mode\n");
27 sbi->flags |= AUTOFS_SBI_CATATONIC;
29 sbi->queues = NULL; /* Erase all wait queues */
32 wq->status = -ENOENT; /* Magic is gone - report failure */
33 kfree(wq->name.name - wq->offset);
36 wake_up_interruptible(&wq->queue);
39 fput(sbi->pipe); /* Close the pipe */
42 mutex_unlock(&sbi->wq_mutex);
45 static int autofs_write(struct autofs_sb_info *sbi,
46 struct file *file, const void *addr, int bytes)
48 unsigned long sigpipe, flags;
49 const char *data = (const char *)addr;
52 sigpipe = sigismember(¤t->pending.signal, SIGPIPE);
54 mutex_lock(&sbi->pipe_mutex);
56 wr = __kernel_write(file, data, bytes, NULL);
62 mutex_unlock(&sbi->pipe_mutex);
64 /* Keep the currently executing process from receiving a
65 * SIGPIPE unless it was already supposed to get one
67 if (wr == -EPIPE && !sigpipe) {
68 spin_lock_irqsave(¤t->sighand->siglock, flags);
69 sigdelset(¤t->pending.signal, SIGPIPE);
71 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
74 /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */
75 return bytes == 0 ? 0 : wr < 0 ? wr : -EIO;
78 static void autofs_notify_daemon(struct autofs_sb_info *sbi,
79 struct autofs_wait_queue *wq,
83 struct autofs_packet_hdr hdr;
84 union autofs_packet_union v4_pkt;
85 union autofs_v5_packet_union v5_pkt;
87 struct file *pipe = NULL;
91 pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n",
92 (unsigned long) wq->wait_queue_token,
93 wq->name.len, wq->name.name, type);
95 memset(&pkt, 0, sizeof(pkt)); /* For security reasons */
97 pkt.hdr.proto_version = sbi->version;
101 /* Kernel protocol v4 missing and expire packets */
102 case autofs_ptype_missing:
104 struct autofs_packet_missing *mp = &pkt.v4_pkt.missing;
108 mp->wait_queue_token = wq->wait_queue_token;
109 mp->len = wq->name.len;
110 memcpy(mp->name, wq->name.name, wq->name.len);
111 mp->name[wq->name.len] = '\0';
114 case autofs_ptype_expire_multi:
116 struct autofs_packet_expire_multi *ep =
117 &pkt.v4_pkt.expire_multi;
121 ep->wait_queue_token = wq->wait_queue_token;
122 ep->len = wq->name.len;
123 memcpy(ep->name, wq->name.name, wq->name.len);
124 ep->name[wq->name.len] = '\0';
128 * Kernel protocol v5 packet for handling indirect and direct
129 * mount missing and expire requests
131 case autofs_ptype_missing_indirect:
132 case autofs_ptype_expire_indirect:
133 case autofs_ptype_missing_direct:
134 case autofs_ptype_expire_direct:
136 struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet;
137 struct user_namespace *user_ns = sbi->pipe->f_cred->user_ns;
139 pktsz = sizeof(*packet);
141 packet->wait_queue_token = wq->wait_queue_token;
142 packet->len = wq->name.len;
143 memcpy(packet->name, wq->name.name, wq->name.len);
144 packet->name[wq->name.len] = '\0';
145 packet->dev = wq->dev;
146 packet->ino = wq->ino;
147 packet->uid = from_kuid_munged(user_ns, wq->uid);
148 packet->gid = from_kgid_munged(user_ns, wq->gid);
149 packet->pid = wq->pid;
150 packet->tgid = wq->tgid;
154 pr_warn("bad type %d!\n", type);
155 mutex_unlock(&sbi->wq_mutex);
159 pipe = get_file(sbi->pipe);
161 mutex_unlock(&sbi->wq_mutex);
163 switch (ret = autofs_write(sbi, pipe, &pkt, pktsz)) {
168 /* Just fail this one */
169 autofs_wait_release(sbi, wq->wait_queue_token, ret);
172 autofs_catatonic_mode(sbi);
178 static struct autofs_wait_queue *
179 autofs_find_wait(struct autofs_sb_info *sbi, const struct qstr *qstr)
181 struct autofs_wait_queue *wq;
183 for (wq = sbi->queues; wq; wq = wq->next) {
184 if (wq->name.hash == qstr->hash &&
185 wq->name.len == qstr->len &&
187 !memcmp(wq->name.name, qstr->name, qstr->len))
194 * Check if we have a valid request.
196 * 1 if the request should continue.
197 * In this case we can return an autofs_wait_queue entry if one is
198 * found or NULL to idicate a new wait needs to be created.
199 * 0 or a negative errno if the request shouldn't continue.
201 static int validate_request(struct autofs_wait_queue **wait,
202 struct autofs_sb_info *sbi,
203 const struct qstr *qstr,
204 const struct path *path, enum autofs_notify notify)
206 struct dentry *dentry = path->dentry;
207 struct autofs_wait_queue *wq;
208 struct autofs_info *ino;
210 if (sbi->flags & AUTOFS_SBI_CATATONIC)
213 /* Wait in progress, continue; */
214 wq = autofs_find_wait(sbi, qstr);
222 /* If we don't yet have any info this is a new request */
223 ino = autofs_dentry_ino(dentry);
228 * If we've been asked to wait on an existing expire (NFY_NONE)
229 * but there is no wait in the queue ...
231 if (notify == NFY_NONE) {
233 * Either we've betean the pending expire to post it's
234 * wait or it finished while we waited on the mutex.
235 * So we need to wait till either, the wait appears
236 * or the expire finishes.
239 while (ino->flags & AUTOFS_INF_EXPIRING) {
240 mutex_unlock(&sbi->wq_mutex);
241 schedule_timeout_interruptible(HZ/10);
242 if (mutex_lock_interruptible(&sbi->wq_mutex))
245 if (sbi->flags & AUTOFS_SBI_CATATONIC)
248 wq = autofs_find_wait(sbi, qstr);
256 * Not ideal but the status has already gone. Of the two
257 * cases where we wait on NFY_NONE neither depend on the
258 * return status of the wait.
264 * If we've been asked to trigger a mount and the request
265 * completed while we waited on the mutex ...
267 if (notify == NFY_MOUNT) {
268 struct dentry *new = NULL;
273 * If the dentry was successfully mounted while we slept
274 * on the wait queue mutex we can return success. If it
275 * isn't mounted (doesn't have submounts for the case of
276 * a multi-mount with no mount at it's base) we can
277 * continue on and create a new request.
279 if (!IS_ROOT(dentry)) {
280 if (d_unhashed(dentry) &&
281 d_really_is_positive(dentry)) {
282 struct dentry *parent = dentry->d_parent;
284 new = d_lookup(parent, &dentry->d_name);
289 this.mnt = path->mnt;
290 this.dentry = dentry;
291 if (path_has_submounts(&this))
302 int autofs_wait(struct autofs_sb_info *sbi,
303 const struct path *path, enum autofs_notify notify)
305 struct dentry *dentry = path->dentry;
306 struct autofs_wait_queue *wq;
309 int status, ret, type;
310 unsigned int offset = 0;
314 /* In catatonic mode, we don't wait for nobody */
315 if (sbi->flags & AUTOFS_SBI_CATATONIC)
319 * Try translating pids to the namespace of the daemon.
321 * Zero means failure: we are in an unrelated pid namespace.
323 pid = task_pid_nr_ns(current, ns_of_pid(sbi->oz_pgrp));
324 tgid = task_tgid_nr_ns(current, ns_of_pid(sbi->oz_pgrp));
325 if (pid == 0 || tgid == 0)
328 if (d_really_is_negative(dentry)) {
330 * A wait for a negative dentry is invalid for certain
331 * cases. A direct or offset mount "always" has its mount
332 * point directory created and so the request dentry must
333 * be positive or the map key doesn't exist. The situation
334 * is very similar for indirect mounts except only dentrys
335 * in the root of the autofs file system may be negative.
337 if (autofs_type_trigger(sbi->type))
339 else if (!IS_ROOT(dentry->d_parent))
343 name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
347 /* If this is a direct mount request create a dummy name */
348 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type)) {
350 qstr.len = sprintf(name, "%p", dentry);
352 char *p = dentry_path_raw(dentry, name, NAME_MAX);
357 qstr.name = ++p; // skip the leading slash
358 qstr.len = strlen(p);
361 qstr.hash = full_name_hash(dentry, qstr.name, qstr.len);
363 if (mutex_lock_interruptible(&sbi->wq_mutex)) {
368 ret = validate_request(&wq, sbi, &qstr, path, notify);
371 mutex_unlock(&sbi->wq_mutex);
377 /* Create a new wait queue */
378 wq = kmalloc(sizeof(struct autofs_wait_queue), GFP_KERNEL);
381 mutex_unlock(&sbi->wq_mutex);
385 wq->wait_queue_token = autofs_next_wait_queue;
386 if (++autofs_next_wait_queue == 0)
387 autofs_next_wait_queue = 1;
388 wq->next = sbi->queues;
390 init_waitqueue_head(&wq->queue);
391 memcpy(&wq->name, &qstr, sizeof(struct qstr));
393 wq->dev = autofs_get_dev(sbi);
394 wq->ino = autofs_get_ino(sbi);
395 wq->uid = current_uid();
396 wq->gid = current_gid();
399 wq->status = -EINTR; /* Status return if interrupted */
402 if (sbi->version < 5) {
403 if (notify == NFY_MOUNT)
404 type = autofs_ptype_missing;
406 type = autofs_ptype_expire_multi;
408 if (notify == NFY_MOUNT)
409 type = autofs_type_trigger(sbi->type) ?
410 autofs_ptype_missing_direct :
411 autofs_ptype_missing_indirect;
413 type = autofs_type_trigger(sbi->type) ?
414 autofs_ptype_expire_direct :
415 autofs_ptype_expire_indirect;
418 pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n",
419 (unsigned long) wq->wait_queue_token, wq->name.len,
420 wq->name.name, notify);
423 * autofs_notify_daemon() may block; it will unlock ->wq_mutex
425 autofs_notify_daemon(sbi, wq, type);
428 pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n",
429 (unsigned long) wq->wait_queue_token, wq->name.len,
430 wq->name.name, notify);
431 mutex_unlock(&sbi->wq_mutex);
436 * wq->name.name is NULL iff the lock is already released
437 * or the mount has been made catatonic.
439 wait_event_killable(wq->queue, wq->name.name == NULL);
443 * For direct and offset mounts we need to track the requester's
444 * uid and gid in the dentry info struct. This is so it can be
445 * supplied, on request, by the misc device ioctl interface.
446 * This is needed during daemon resatart when reconnecting
447 * to existing, active, autofs mounts. The uid and gid (and
448 * related string values) may be used for macro substitution
449 * in autofs mount maps.
452 struct autofs_info *ino;
453 struct dentry *de = NULL;
455 /* direct mount or browsable map */
456 ino = autofs_dentry_ino(dentry);
458 /* If not lookup actual dentry used */
459 de = d_lookup(dentry->d_parent, &dentry->d_name);
461 ino = autofs_dentry_ino(de);
464 /* Set mount requester */
466 spin_lock(&sbi->fs_lock);
469 spin_unlock(&sbi->fs_lock);
476 /* Are we the last process to need status? */
477 mutex_lock(&sbi->wq_mutex);
480 mutex_unlock(&sbi->wq_mutex);
486 int autofs_wait_release(struct autofs_sb_info *sbi,
487 autofs_wqt_t wait_queue_token, int status)
489 struct autofs_wait_queue *wq, **wql;
491 mutex_lock(&sbi->wq_mutex);
492 for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) {
493 if (wq->wait_queue_token == wait_queue_token)
498 mutex_unlock(&sbi->wq_mutex);
502 *wql = wq->next; /* Unlink from chain */
503 kfree(wq->name.name - wq->offset);
504 wq->name.name = NULL; /* Do not wait on this queue */
509 mutex_unlock(&sbi->wq_mutex);