1 // SPDX-License-Identifier: GPL-2.0
3 * Finite state machine for vfio-ccw device handling
5 * Copyright IBM Corp. 2017
6 * Copyright Red Hat, Inc. 2019
12 #include <linux/vfio.h>
13 #include <linux/mdev.h>
16 #include "vfio_ccw_private.h"
18 #define CREATE_TRACE_POINTS
19 #include "vfio_ccw_trace.h"
21 static int fsm_io_helper(struct vfio_ccw_private *private)
23 struct subchannel *sch;
32 spin_lock_irqsave(sch->lock, flags);
34 orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
40 VFIO_CCW_TRACE_EVENT(5, "stIO");
41 VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev));
43 /* Issue "Start Subchannel" */
44 ccode = ssch(sch->schid, orb);
46 VFIO_CCW_HEX_EVENT(5, &ccode, sizeof(ccode));
51 * Initialize device status information
53 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
55 private->state = VFIO_CCW_STATE_CP_PENDING;
57 case 1: /* Status pending */
61 case 3: /* Device/path not operational */
69 if (cio_update_schib(sch))
72 ret = sch->lpm ? -EACCES : -ENODEV;
79 spin_unlock_irqrestore(sch->lock, flags);
83 static int fsm_do_halt(struct vfio_ccw_private *private)
85 struct subchannel *sch;
92 spin_lock_irqsave(sch->lock, flags);
94 VFIO_CCW_TRACE_EVENT(2, "haltIO");
95 VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
97 /* Issue "Halt Subchannel" */
98 ccode = hsch(sch->schid);
100 VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
105 * Initialize device status information
107 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
110 case 1: /* Status pending */
114 case 3: /* Device not operational */
120 spin_unlock_irqrestore(sch->lock, flags);
124 static int fsm_do_clear(struct vfio_ccw_private *private)
126 struct subchannel *sch;
133 spin_lock_irqsave(sch->lock, flags);
135 VFIO_CCW_TRACE_EVENT(2, "clearIO");
136 VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
138 /* Issue "Clear Subchannel" */
139 ccode = csch(sch->schid);
141 VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
146 * Initialize device status information
148 sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
149 /* TODO: check what else we might need to clear */
152 case 3: /* Device not operational */
158 spin_unlock_irqrestore(sch->lock, flags);
162 static void fsm_notoper(struct vfio_ccw_private *private,
163 enum vfio_ccw_event event)
165 struct subchannel *sch = private->sch;
167 VFIO_CCW_TRACE_EVENT(2, "notoper");
168 VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
172 * Probably we should send the machine check to the guest.
174 css_sched_sch_todo(sch, SCH_TODO_UNREG);
175 private->state = VFIO_CCW_STATE_NOT_OPER;
179 * No operation action.
181 static void fsm_nop(struct vfio_ccw_private *private,
182 enum vfio_ccw_event event)
186 static void fsm_io_error(struct vfio_ccw_private *private,
187 enum vfio_ccw_event event)
189 pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
190 private->io_region->ret_code = -EIO;
193 static void fsm_io_busy(struct vfio_ccw_private *private,
194 enum vfio_ccw_event event)
196 private->io_region->ret_code = -EBUSY;
199 static void fsm_io_retry(struct vfio_ccw_private *private,
200 enum vfio_ccw_event event)
202 private->io_region->ret_code = -EAGAIN;
205 static void fsm_async_error(struct vfio_ccw_private *private,
206 enum vfio_ccw_event event)
208 struct ccw_cmd_region *cmd_region = private->cmd_region;
210 pr_err("vfio-ccw: FSM: %s request from state:%d\n",
211 cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
212 cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
213 "<unknown>", private->state);
214 cmd_region->ret_code = -EIO;
217 static void fsm_async_retry(struct vfio_ccw_private *private,
218 enum vfio_ccw_event event)
220 private->cmd_region->ret_code = -EAGAIN;
223 static void fsm_disabled_irq(struct vfio_ccw_private *private,
224 enum vfio_ccw_event event)
226 struct subchannel *sch = private->sch;
229 * An interrupt in a disabled state means a previous disable was not
230 * successful - should not happen, but we try to disable again.
232 cio_disable_subchannel(sch);
234 inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
236 return p->sch->schid;
240 * Deal with the ccw command request from the userspace.
242 static void fsm_io_request(struct vfio_ccw_private *private,
243 enum vfio_ccw_event event)
246 union scsw *scsw = &private->scsw;
247 struct ccw_io_region *io_region = private->io_region;
248 struct mdev_device *mdev = private->mdev;
249 char *errstr = "request";
250 struct subchannel_id schid = get_schid(private);
252 private->state = VFIO_CCW_STATE_CP_PROCESSING;
253 memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
255 if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
256 orb = (union orb *)io_region->orb_area;
258 /* Don't try to build a cp if transport mode is specified. */
260 io_region->ret_code = -EOPNOTSUPP;
261 VFIO_CCW_MSG_EVENT(2,
262 "%pUl (%x.%x.%04x): transport mode\n",
263 mdev_uuid(mdev), schid.cssid,
264 schid.ssid, schid.sch_no);
265 errstr = "transport mode";
268 io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
270 if (io_region->ret_code) {
271 VFIO_CCW_MSG_EVENT(2,
272 "%pUl (%x.%x.%04x): cp_init=%d\n",
273 mdev_uuid(mdev), schid.cssid,
274 schid.ssid, schid.sch_no,
275 io_region->ret_code);
280 io_region->ret_code = cp_prefetch(&private->cp);
281 if (io_region->ret_code) {
282 VFIO_CCW_MSG_EVENT(2,
283 "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
284 mdev_uuid(mdev), schid.cssid,
285 schid.ssid, schid.sch_no,
286 io_region->ret_code);
287 errstr = "cp prefetch";
288 cp_free(&private->cp);
292 /* Start channel program and wait for I/O interrupt. */
293 io_region->ret_code = fsm_io_helper(private);
294 if (io_region->ret_code) {
295 VFIO_CCW_MSG_EVENT(2,
296 "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
297 mdev_uuid(mdev), schid.cssid,
298 schid.ssid, schid.sch_no,
299 io_region->ret_code);
300 errstr = "cp fsm_io_helper";
301 cp_free(&private->cp);
305 } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
306 VFIO_CCW_MSG_EVENT(2,
307 "%pUl (%x.%x.%04x): halt on io_region\n",
308 mdev_uuid(mdev), schid.cssid,
309 schid.ssid, schid.sch_no);
310 /* halt is handled via the async cmd region */
311 io_region->ret_code = -EOPNOTSUPP;
313 } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
314 VFIO_CCW_MSG_EVENT(2,
315 "%pUl (%x.%x.%04x): clear on io_region\n",
316 mdev_uuid(mdev), schid.cssid,
317 schid.ssid, schid.sch_no);
318 /* clear is handled via the async cmd region */
319 io_region->ret_code = -EOPNOTSUPP;
324 trace_vfio_ccw_io_fctl(scsw->cmd.fctl, schid,
325 io_region->ret_code, errstr);
329 * Deal with an async request from userspace.
331 static void fsm_async_request(struct vfio_ccw_private *private,
332 enum vfio_ccw_event event)
334 struct ccw_cmd_region *cmd_region = private->cmd_region;
336 switch (cmd_region->command) {
337 case VFIO_CCW_ASYNC_CMD_HSCH:
338 cmd_region->ret_code = fsm_do_halt(private);
340 case VFIO_CCW_ASYNC_CMD_CSCH:
341 cmd_region->ret_code = fsm_do_clear(private);
344 /* should not happen? */
345 cmd_region->ret_code = -EINVAL;
350 * Got an interrupt for a normal io (state busy).
352 static void fsm_irq(struct vfio_ccw_private *private,
353 enum vfio_ccw_event event)
355 struct irb *irb = this_cpu_ptr(&cio_irb);
357 VFIO_CCW_TRACE_EVENT(6, "IRQ");
358 VFIO_CCW_TRACE_EVENT(6, dev_name(&private->sch->dev));
360 memcpy(&private->irb, irb, sizeof(*irb));
362 queue_work(vfio_ccw_work_q, &private->io_work);
364 if (private->completion)
365 complete(private->completion);
369 * Device statemachine
371 fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
372 [VFIO_CCW_STATE_NOT_OPER] = {
373 [VFIO_CCW_EVENT_NOT_OPER] = fsm_nop,
374 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
375 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
376 [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
378 [VFIO_CCW_STATE_STANDBY] = {
379 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
380 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
381 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
382 [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
384 [VFIO_CCW_STATE_IDLE] = {
385 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
386 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
387 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
388 [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
390 [VFIO_CCW_STATE_CP_PROCESSING] = {
391 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
392 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
393 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
394 [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
396 [VFIO_CCW_STATE_CP_PENDING] = {
397 [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
398 [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
399 [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
400 [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,