]> Git Repo - linux.git/blob - drivers/s390/cio/vfio_ccw_fsm.c
net: dsa: sja1105: Implement state machine for TAS with PTP clock source
[linux.git] / drivers / s390 / cio / vfio_ccw_fsm.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Finite state machine for vfio-ccw device handling
4  *
5  * Copyright IBM Corp. 2017
6  * Copyright Red Hat, Inc. 2019
7  *
8  * Author(s): Dong Jia Shi <[email protected]>
9  *            Cornelia Huck <[email protected]>
10  */
11
12 #include <linux/vfio.h>
13 #include <linux/mdev.h>
14
15 #include "ioasm.h"
16 #include "vfio_ccw_private.h"
17
18 #define CREATE_TRACE_POINTS
19 #include "vfio_ccw_trace.h"
20
21 static int fsm_io_helper(struct vfio_ccw_private *private)
22 {
23         struct subchannel *sch;
24         union orb *orb;
25         int ccode;
26         __u8 lpm;
27         unsigned long flags;
28         int ret;
29
30         sch = private->sch;
31
32         spin_lock_irqsave(sch->lock, flags);
33
34         orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
35         if (!orb) {
36                 ret = -EIO;
37                 goto out;
38         }
39
40         VFIO_CCW_TRACE_EVENT(5, "stIO");
41         VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev));
42
43         /* Issue "Start Subchannel" */
44         ccode = ssch(sch->schid, orb);
45
46         VFIO_CCW_HEX_EVENT(5, &ccode, sizeof(ccode));
47
48         switch (ccode) {
49         case 0:
50                 /*
51                  * Initialize device status information
52                  */
53                 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
54                 ret = 0;
55                 private->state = VFIO_CCW_STATE_CP_PENDING;
56                 break;
57         case 1:         /* Status pending */
58         case 2:         /* Busy */
59                 ret = -EBUSY;
60                 break;
61         case 3:         /* Device/path not operational */
62         {
63                 lpm = orb->cmd.lpm;
64                 if (lpm != 0)
65                         sch->lpm &= ~lpm;
66                 else
67                         sch->lpm = 0;
68
69                 if (cio_update_schib(sch))
70                         ret = -ENODEV;
71                 else
72                         ret = sch->lpm ? -EACCES : -ENODEV;
73                 break;
74         }
75         default:
76                 ret = ccode;
77         }
78 out:
79         spin_unlock_irqrestore(sch->lock, flags);
80         return ret;
81 }
82
83 static int fsm_do_halt(struct vfio_ccw_private *private)
84 {
85         struct subchannel *sch;
86         unsigned long flags;
87         int ccode;
88         int ret;
89
90         sch = private->sch;
91
92         spin_lock_irqsave(sch->lock, flags);
93
94         VFIO_CCW_TRACE_EVENT(2, "haltIO");
95         VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
96
97         /* Issue "Halt Subchannel" */
98         ccode = hsch(sch->schid);
99
100         VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
101
102         switch (ccode) {
103         case 0:
104                 /*
105                  * Initialize device status information
106                  */
107                 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
108                 ret = 0;
109                 break;
110         case 1:         /* Status pending */
111         case 2:         /* Busy */
112                 ret = -EBUSY;
113                 break;
114         case 3:         /* Device not operational */
115                 ret = -ENODEV;
116                 break;
117         default:
118                 ret = ccode;
119         }
120         spin_unlock_irqrestore(sch->lock, flags);
121         return ret;
122 }
123
124 static int fsm_do_clear(struct vfio_ccw_private *private)
125 {
126         struct subchannel *sch;
127         unsigned long flags;
128         int ccode;
129         int ret;
130
131         sch = private->sch;
132
133         spin_lock_irqsave(sch->lock, flags);
134
135         VFIO_CCW_TRACE_EVENT(2, "clearIO");
136         VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
137
138         /* Issue "Clear Subchannel" */
139         ccode = csch(sch->schid);
140
141         VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
142
143         switch (ccode) {
144         case 0:
145                 /*
146                  * Initialize device status information
147                  */
148                 sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
149                 /* TODO: check what else we might need to clear */
150                 ret = 0;
151                 break;
152         case 3:         /* Device not operational */
153                 ret = -ENODEV;
154                 break;
155         default:
156                 ret = ccode;
157         }
158         spin_unlock_irqrestore(sch->lock, flags);
159         return ret;
160 }
161
162 static void fsm_notoper(struct vfio_ccw_private *private,
163                         enum vfio_ccw_event event)
164 {
165         struct subchannel *sch = private->sch;
166
167         VFIO_CCW_TRACE_EVENT(2, "notoper");
168         VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
169
170         /*
171          * TODO:
172          * Probably we should send the machine check to the guest.
173          */
174         css_sched_sch_todo(sch, SCH_TODO_UNREG);
175         private->state = VFIO_CCW_STATE_NOT_OPER;
176 }
177
178 /*
179  * No operation action.
180  */
181 static void fsm_nop(struct vfio_ccw_private *private,
182                     enum vfio_ccw_event event)
183 {
184 }
185
186 static void fsm_io_error(struct vfio_ccw_private *private,
187                          enum vfio_ccw_event event)
188 {
189         pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
190         private->io_region->ret_code = -EIO;
191 }
192
193 static void fsm_io_busy(struct vfio_ccw_private *private,
194                         enum vfio_ccw_event event)
195 {
196         private->io_region->ret_code = -EBUSY;
197 }
198
199 static void fsm_io_retry(struct vfio_ccw_private *private,
200                          enum vfio_ccw_event event)
201 {
202         private->io_region->ret_code = -EAGAIN;
203 }
204
205 static void fsm_async_error(struct vfio_ccw_private *private,
206                             enum vfio_ccw_event event)
207 {
208         struct ccw_cmd_region *cmd_region = private->cmd_region;
209
210         pr_err("vfio-ccw: FSM: %s request from state:%d\n",
211                cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
212                cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
213                "<unknown>", private->state);
214         cmd_region->ret_code = -EIO;
215 }
216
217 static void fsm_async_retry(struct vfio_ccw_private *private,
218                             enum vfio_ccw_event event)
219 {
220         private->cmd_region->ret_code = -EAGAIN;
221 }
222
223 static void fsm_disabled_irq(struct vfio_ccw_private *private,
224                              enum vfio_ccw_event event)
225 {
226         struct subchannel *sch = private->sch;
227
228         /*
229          * An interrupt in a disabled state means a previous disable was not
230          * successful - should not happen, but we try to disable again.
231          */
232         cio_disable_subchannel(sch);
233 }
234 inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
235 {
236         return p->sch->schid;
237 }
238
239 /*
240  * Deal with the ccw command request from the userspace.
241  */
242 static void fsm_io_request(struct vfio_ccw_private *private,
243                            enum vfio_ccw_event event)
244 {
245         union orb *orb;
246         union scsw *scsw = &private->scsw;
247         struct ccw_io_region *io_region = private->io_region;
248         struct mdev_device *mdev = private->mdev;
249         char *errstr = "request";
250         struct subchannel_id schid = get_schid(private);
251
252         private->state = VFIO_CCW_STATE_CP_PROCESSING;
253         memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
254
255         if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
256                 orb = (union orb *)io_region->orb_area;
257
258                 /* Don't try to build a cp if transport mode is specified. */
259                 if (orb->tm.b) {
260                         io_region->ret_code = -EOPNOTSUPP;
261                         VFIO_CCW_MSG_EVENT(2,
262                                            "%pUl (%x.%x.%04x): transport mode\n",
263                                            mdev_uuid(mdev), schid.cssid,
264                                            schid.ssid, schid.sch_no);
265                         errstr = "transport mode";
266                         goto err_out;
267                 }
268                 io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
269                                               orb);
270                 if (io_region->ret_code) {
271                         VFIO_CCW_MSG_EVENT(2,
272                                            "%pUl (%x.%x.%04x): cp_init=%d\n",
273                                            mdev_uuid(mdev), schid.cssid,
274                                            schid.ssid, schid.sch_no,
275                                            io_region->ret_code);
276                         errstr = "cp init";
277                         goto err_out;
278                 }
279
280                 io_region->ret_code = cp_prefetch(&private->cp);
281                 if (io_region->ret_code) {
282                         VFIO_CCW_MSG_EVENT(2,
283                                            "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
284                                            mdev_uuid(mdev), schid.cssid,
285                                            schid.ssid, schid.sch_no,
286                                            io_region->ret_code);
287                         errstr = "cp prefetch";
288                         cp_free(&private->cp);
289                         goto err_out;
290                 }
291
292                 /* Start channel program and wait for I/O interrupt. */
293                 io_region->ret_code = fsm_io_helper(private);
294                 if (io_region->ret_code) {
295                         VFIO_CCW_MSG_EVENT(2,
296                                            "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
297                                            mdev_uuid(mdev), schid.cssid,
298                                            schid.ssid, schid.sch_no,
299                                            io_region->ret_code);
300                         errstr = "cp fsm_io_helper";
301                         cp_free(&private->cp);
302                         goto err_out;
303                 }
304                 return;
305         } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
306                 VFIO_CCW_MSG_EVENT(2,
307                                    "%pUl (%x.%x.%04x): halt on io_region\n",
308                                    mdev_uuid(mdev), schid.cssid,
309                                    schid.ssid, schid.sch_no);
310                 /* halt is handled via the async cmd region */
311                 io_region->ret_code = -EOPNOTSUPP;
312                 goto err_out;
313         } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
314                 VFIO_CCW_MSG_EVENT(2,
315                                    "%pUl (%x.%x.%04x): clear on io_region\n",
316                                    mdev_uuid(mdev), schid.cssid,
317                                    schid.ssid, schid.sch_no);
318                 /* clear is handled via the async cmd region */
319                 io_region->ret_code = -EOPNOTSUPP;
320                 goto err_out;
321         }
322
323 err_out:
324         trace_vfio_ccw_io_fctl(scsw->cmd.fctl, schid,
325                                io_region->ret_code, errstr);
326 }
327
328 /*
329  * Deal with an async request from userspace.
330  */
331 static void fsm_async_request(struct vfio_ccw_private *private,
332                               enum vfio_ccw_event event)
333 {
334         struct ccw_cmd_region *cmd_region = private->cmd_region;
335
336         switch (cmd_region->command) {
337         case VFIO_CCW_ASYNC_CMD_HSCH:
338                 cmd_region->ret_code = fsm_do_halt(private);
339                 break;
340         case VFIO_CCW_ASYNC_CMD_CSCH:
341                 cmd_region->ret_code = fsm_do_clear(private);
342                 break;
343         default:
344                 /* should not happen? */
345                 cmd_region->ret_code = -EINVAL;
346         }
347 }
348
349 /*
350  * Got an interrupt for a normal io (state busy).
351  */
352 static void fsm_irq(struct vfio_ccw_private *private,
353                     enum vfio_ccw_event event)
354 {
355         struct irb *irb = this_cpu_ptr(&cio_irb);
356
357         VFIO_CCW_TRACE_EVENT(6, "IRQ");
358         VFIO_CCW_TRACE_EVENT(6, dev_name(&private->sch->dev));
359
360         memcpy(&private->irb, irb, sizeof(*irb));
361
362         queue_work(vfio_ccw_work_q, &private->io_work);
363
364         if (private->completion)
365                 complete(private->completion);
366 }
367
368 /*
369  * Device statemachine
370  */
371 fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
372         [VFIO_CCW_STATE_NOT_OPER] = {
373                 [VFIO_CCW_EVENT_NOT_OPER]       = fsm_nop,
374                 [VFIO_CCW_EVENT_IO_REQ]         = fsm_io_error,
375                 [VFIO_CCW_EVENT_ASYNC_REQ]      = fsm_async_error,
376                 [VFIO_CCW_EVENT_INTERRUPT]      = fsm_disabled_irq,
377         },
378         [VFIO_CCW_STATE_STANDBY] = {
379                 [VFIO_CCW_EVENT_NOT_OPER]       = fsm_notoper,
380                 [VFIO_CCW_EVENT_IO_REQ]         = fsm_io_error,
381                 [VFIO_CCW_EVENT_ASYNC_REQ]      = fsm_async_error,
382                 [VFIO_CCW_EVENT_INTERRUPT]      = fsm_irq,
383         },
384         [VFIO_CCW_STATE_IDLE] = {
385                 [VFIO_CCW_EVENT_NOT_OPER]       = fsm_notoper,
386                 [VFIO_CCW_EVENT_IO_REQ]         = fsm_io_request,
387                 [VFIO_CCW_EVENT_ASYNC_REQ]      = fsm_async_request,
388                 [VFIO_CCW_EVENT_INTERRUPT]      = fsm_irq,
389         },
390         [VFIO_CCW_STATE_CP_PROCESSING] = {
391                 [VFIO_CCW_EVENT_NOT_OPER]       = fsm_notoper,
392                 [VFIO_CCW_EVENT_IO_REQ]         = fsm_io_retry,
393                 [VFIO_CCW_EVENT_ASYNC_REQ]      = fsm_async_retry,
394                 [VFIO_CCW_EVENT_INTERRUPT]      = fsm_irq,
395         },
396         [VFIO_CCW_STATE_CP_PENDING] = {
397                 [VFIO_CCW_EVENT_NOT_OPER]       = fsm_notoper,
398                 [VFIO_CCW_EVENT_IO_REQ]         = fsm_io_busy,
399                 [VFIO_CCW_EVENT_ASYNC_REQ]      = fsm_async_request,
400                 [VFIO_CCW_EVENT_INTERRUPT]      = fsm_irq,
401         },
402 };
This page took 0.091585 seconds and 4 git commands to generate.