1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel MIC Platform Software Stack (MPSS)
5 * Copyright(c) 2014 Intel Corporation.
11 static int scif_fdopen(struct inode *inode, struct file *f)
13 struct scif_endpt *priv = scif_open();
17 f->private_data = priv;
21 static int scif_fdclose(struct inode *inode, struct file *f)
23 struct scif_endpt *priv = f->private_data;
25 return scif_close(priv);
28 static int scif_fdmmap(struct file *f, struct vm_area_struct *vma)
30 struct scif_endpt *priv = f->private_data;
32 return scif_mmap(vma, priv);
35 static __poll_t scif_fdpoll(struct file *f, poll_table *wait)
37 struct scif_endpt *priv = f->private_data;
39 return __scif_pollfd(f, wait, priv);
42 static int scif_fdflush(struct file *f, fl_owner_t id)
44 struct scif_endpt *ep = f->private_data;
48 * The listening endpoint stashes the open file information before
49 * waiting for incoming connections. The release callback would never be
50 * called if the application closed the endpoint, while waiting for
51 * incoming connections from a separate thread since the file descriptor
52 * reference count is bumped up in the accept IOCTL. Call the flush
53 * routine if the id matches the endpoint open file information so that
54 * the listening endpoint can be woken up and the fd released.
58 spin_unlock(&ep->lock);
62 static __always_inline void scif_err_debug(int err, const char *str)
65 * ENOTCONN is a common uninteresting error which is
66 * flooding debug messages to the console unnecessarily.
68 if (err < 0 && err != -ENOTCONN)
69 dev_dbg(scif_info.mdev.this_device, "%s err %d\n", str, err);
72 static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
74 struct scif_endpt *priv = f->private_data;
75 void __user *argp = (void __user *)arg;
77 struct scifioctl_msg request;
78 bool non_block = false;
80 non_block = !!(f->f_flags & O_NONBLOCK);
87 if (copy_from_user(&pn, argp, sizeof(pn)))
90 pn = scif_bind(priv, pn);
94 if (copy_to_user(argp, &pn, sizeof(pn)))
100 return scif_listen(priv, arg);
103 struct scifioctl_connect req;
104 struct scif_endpt *ep = (struct scif_endpt *)priv;
106 if (copy_from_user(&req, argp, sizeof(req)))
109 err = __scif_connect(priv, &req.peer, non_block);
113 req.self.node = ep->port.node;
114 req.self.port = ep->port.port;
116 if (copy_to_user(argp, &req, sizeof(req)))
122 * Accept is done in two halves. The request ioctl does the basic
123 * functionality of accepting the request and returning the information
124 * about it including the internal ID of the end point. The register
125 * is done with the internal ID on a new file descriptor opened by the
126 * requesting process.
130 struct scifioctl_accept request;
131 scif_epd_t *ep = (scif_epd_t *)&request.endpt;
133 if (copy_from_user(&request, argp, sizeof(request)))
136 err = scif_accept(priv, &request.peer, ep, request.flags);
140 if (copy_to_user(argp, &request, sizeof(request))) {
145 * Add to the list of user mode eps where the second half
146 * of the accept is not yet completed.
148 mutex_lock(&scif_info.eplock);
149 list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept);
150 list_add_tail(&((*ep)->liacceptlist), &priv->li_accept);
151 (*ep)->listenep = priv;
153 mutex_unlock(&scif_info.eplock);
159 struct scif_endpt *priv = f->private_data;
160 struct scif_endpt *newep;
161 struct scif_endpt *lisep;
162 struct scif_endpt *fep = NULL;
163 struct scif_endpt *tmpep;
164 struct list_head *pos, *tmpq;
166 /* Finally replace the pointer to the accepted endpoint */
167 if (copy_from_user(&newep, argp, sizeof(void *)))
170 /* Remove form the user accept queue */
171 mutex_lock(&scif_info.eplock);
172 list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
173 tmpep = list_entry(pos,
174 struct scif_endpt, miacceptlist);
175 if (tmpep == newep) {
183 mutex_unlock(&scif_info.eplock);
187 lisep = newep->listenep;
188 list_for_each_safe(pos, tmpq, &lisep->li_accept) {
189 tmpep = list_entry(pos,
190 struct scif_endpt, liacceptlist);
191 if (tmpep == newep) {
198 mutex_unlock(&scif_info.eplock);
200 /* Free the resources automatically created from the open. */
201 scif_anon_inode_fput(priv);
202 scif_teardown_ep(priv);
203 scif_add_epd_to_zombie_list(priv, !SCIF_EPLOCK_HELD);
204 f->private_data = newep;
209 struct scif_endpt *priv = f->private_data;
211 if (copy_from_user(&request, argp,
212 sizeof(struct scifioctl_msg))) {
216 err = scif_user_send(priv, (void __user *)request.msg,
217 request.len, request.flags);
221 ((struct scifioctl_msg __user *)argp)->out_len,
222 &err, sizeof(err))) {
228 scif_err_debug(err, "scif_send");
233 struct scif_endpt *priv = f->private_data;
235 if (copy_from_user(&request, argp,
236 sizeof(struct scifioctl_msg))) {
241 err = scif_user_recv(priv, (void __user *)request.msg,
242 request.len, request.flags);
247 ((struct scifioctl_msg __user *)argp)->out_len,
248 &err, sizeof(err))) {
254 scif_err_debug(err, "scif_recv");
257 case SCIF_GET_NODEIDS:
259 struct scifioctl_node_ids node_ids;
262 void __user *unodes, *uself;
265 if (copy_from_user(&node_ids, argp, sizeof(node_ids))) {
270 entries = min_t(int, scif_info.maxid, node_ids.len);
271 nodes = kmalloc_array(entries, sizeof(u16), GFP_KERNEL);
272 if (entries && !nodes) {
276 node_ids.len = scif_get_node_ids(nodes, entries, &self);
278 unodes = (void __user *)node_ids.nodes;
279 if (copy_to_user(unodes, nodes, sizeof(u16) * entries)) {
284 uself = (void __user *)node_ids.self;
285 if (copy_to_user(uself, &self, sizeof(u16))) {
290 if (copy_to_user(argp, &node_ids, sizeof(node_ids))) {
301 struct scif_endpt *priv = f->private_data;
302 struct scifioctl_reg reg;
305 if (copy_from_user(®, argp, sizeof(reg))) {
309 if (reg.flags & SCIF_MAP_KERNEL) {
313 ret = scif_register(priv, (void *)reg.addr, reg.len,
314 reg.offset, reg.prot, reg.flags);
320 if (copy_to_user(&((struct scifioctl_reg __user *)argp)
321 ->out_offset, &ret, sizeof(reg.out_offset))) {
327 scif_err_debug(err, "scif_register");
332 struct scif_endpt *priv = f->private_data;
333 struct scifioctl_unreg unreg;
335 if (copy_from_user(&unreg, argp, sizeof(unreg))) {
339 err = scif_unregister(priv, unreg.offset, unreg.len);
341 scif_err_debug(err, "scif_unregister");
346 struct scif_endpt *priv = f->private_data;
347 struct scifioctl_copy copy;
349 if (copy_from_user(©, argp, sizeof(copy))) {
353 err = scif_readfrom(priv, copy.loffset, copy.len, copy.roffset,
356 scif_err_debug(err, "scif_readfrom");
361 struct scif_endpt *priv = f->private_data;
362 struct scifioctl_copy copy;
364 if (copy_from_user(©, argp, sizeof(copy))) {
368 err = scif_writeto(priv, copy.loffset, copy.len, copy.roffset,
371 scif_err_debug(err, "scif_writeto");
376 struct scif_endpt *priv = f->private_data;
377 struct scifioctl_copy copy;
379 if (copy_from_user(©, argp, sizeof(copy))) {
383 err = scif_vreadfrom(priv, (void __force *)copy.addr, copy.len,
384 copy.roffset, copy.flags);
386 scif_err_debug(err, "scif_vreadfrom");
391 struct scif_endpt *priv = f->private_data;
392 struct scifioctl_copy copy;
394 if (copy_from_user(©, argp, sizeof(copy))) {
398 err = scif_vwriteto(priv, (void __force *)copy.addr, copy.len,
399 copy.roffset, copy.flags);
401 scif_err_debug(err, "scif_vwriteto");
404 case SCIF_FENCE_MARK:
406 struct scif_endpt *priv = f->private_data;
407 struct scifioctl_fence_mark mark;
410 if (copy_from_user(&mark, argp, sizeof(mark))) {
414 err = scif_fence_mark(priv, mark.flags, &tmp_mark);
417 if (copy_to_user((void __user *)mark.mark, &tmp_mark,
423 scif_err_debug(err, "scif_fence_mark");
426 case SCIF_FENCE_WAIT:
428 struct scif_endpt *priv = f->private_data;
430 err = scif_fence_wait(priv, arg);
431 scif_err_debug(err, "scif_fence_wait");
434 case SCIF_FENCE_SIGNAL:
436 struct scif_endpt *priv = f->private_data;
437 struct scifioctl_fence_signal signal;
439 if (copy_from_user(&signal, argp, sizeof(signal))) {
441 goto fence_signal_err;
444 err = scif_fence_signal(priv, signal.loff, signal.lval,
445 signal.roff, signal.rval, signal.flags);
447 scif_err_debug(err, "scif_fence_signal");
454 const struct file_operations scif_fops = {
456 .release = scif_fdclose,
457 .unlocked_ioctl = scif_fdioctl,
460 .flush = scif_fdflush,
461 .owner = THIS_MODULE,