1 // SPDX-License-Identifier: GPL-2.0
2 /* viohs.c: LDOM Virtual I/O handshake helper layer.
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/string.h>
10 #include <linux/delay.h>
11 #include <linux/sched.h>
12 #include <linux/sched/clock.h>
13 #include <linux/slab.h>
18 int vio_ldc_send(struct vio_driver_state *vio, void *data, int len)
20 int err, limit = 1000;
24 err = ldc_write(vio->lp, data, len);
25 if (!err || (err != -EAGAIN))
32 EXPORT_SYMBOL(vio_ldc_send);
34 static int send_ctrl(struct vio_driver_state *vio,
35 struct vio_msg_tag *tag, int len)
37 tag->sid = vio_send_sid(vio);
38 return vio_ldc_send(vio, tag, len);
41 static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env)
45 tag->stype_env = stype_env;
48 static int send_version(struct vio_driver_state *vio, u16 major, u16 minor)
50 struct vio_ver_info pkt;
52 vio->_local_sid = (u32) sched_clock();
54 memset(&pkt, 0, sizeof(pkt));
55 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO);
58 pkt.dev_class = vio->dev_class;
60 viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n",
61 major, minor, vio->dev_class);
63 return send_ctrl(vio, &pkt.tag, sizeof(pkt));
66 static int start_handshake(struct vio_driver_state *vio)
70 viodbg(HS, "START HANDSHAKE\n");
72 vio->hs_state = VIO_HS_INVALID;
74 err = send_version(vio,
75 vio->ver_table[0].major,
76 vio->ver_table[0].minor);
83 static void flush_rx_dring(struct vio_driver_state *vio)
85 struct vio_dring_state *dr;
88 BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG));
90 dr = &vio->drings[VIO_DRIVER_RX_RING];
93 BUG_ON(!vio->desc_buf);
97 memset(dr, 0, sizeof(*dr));
101 void vio_link_state_change(struct vio_driver_state *vio, int event)
103 if (event == LDC_EVENT_UP) {
104 vio->hs_state = VIO_HS_INVALID;
106 switch (vio->dev_class) {
108 case VDEV_NETWORK_SWITCH:
109 vio->dr_state = (VIO_DR_STATE_TXREQ |
114 vio->dr_state = VIO_DR_STATE_TXREQ;
116 case VDEV_DISK_SERVER:
117 vio->dr_state = VIO_DR_STATE_RXREQ;
120 start_handshake(vio);
121 } else if (event == LDC_EVENT_RESET) {
122 vio->hs_state = VIO_HS_INVALID;
124 if (vio->dr_state & VIO_DR_STATE_RXREG)
127 vio->dr_state = 0x00;
128 memset(&vio->ver, 0, sizeof(vio->ver));
130 ldc_disconnect(vio->lp);
133 EXPORT_SYMBOL(vio_link_state_change);
135 static int handshake_failure(struct vio_driver_state *vio)
137 struct vio_dring_state *dr;
139 /* XXX Put policy here... Perhaps start a timer to fire
140 * XXX in 100 ms, which will bring the link up and retry
144 viodbg(HS, "HANDSHAKE FAILURE\n");
146 vio->dr_state &= ~(VIO_DR_STATE_TXREG |
149 dr = &vio->drings[VIO_DRIVER_RX_RING];
150 memset(dr, 0, sizeof(*dr));
152 kfree(vio->desc_buf);
153 vio->desc_buf = NULL;
154 vio->desc_buf_len = 0;
156 vio->hs_state = VIO_HS_INVALID;
161 static int process_unknown(struct vio_driver_state *vio, void *arg)
163 struct vio_msg_tag *pkt = arg;
165 viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n",
166 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
168 printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n",
169 vio->vdev->channel_id);
171 ldc_disconnect(vio->lp);
176 static int send_dreg(struct vio_driver_state *vio)
178 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING];
180 struct vio_dring_register pkt;
181 char all[sizeof(struct vio_dring_register) +
182 (sizeof(struct ldc_trans_cookie) *
183 VIO_MAX_RING_COOKIES)];
185 size_t bytes = sizeof(struct vio_dring_register) +
186 (sizeof(struct ldc_trans_cookie) *
190 if (WARN_ON(bytes > sizeof(u)))
193 memset(&u, 0, bytes);
194 init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
195 u.pkt.dring_ident = 0;
196 u.pkt.num_descr = dr->num_entries;
197 u.pkt.descr_size = dr->entry_size;
198 u.pkt.options = VIO_TX_DRING;
199 u.pkt.num_cookies = dr->ncookies;
201 viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] "
203 u.pkt.num_descr, u.pkt.descr_size, u.pkt.options,
206 for (i = 0; i < dr->ncookies; i++) {
207 u.pkt.cookies[i] = dr->cookies[i];
209 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
211 (unsigned long long) u.pkt.cookies[i].cookie_addr,
212 (unsigned long long) u.pkt.cookies[i].cookie_size);
215 return send_ctrl(vio, &u.pkt.tag, bytes);
218 static int send_rdx(struct vio_driver_state *vio)
222 memset(&pkt, 0, sizeof(pkt));
224 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX);
226 viodbg(HS, "SEND RDX INFO\n");
228 return send_ctrl(vio, &pkt.tag, sizeof(pkt));
231 static int send_attr(struct vio_driver_state *vio)
236 return vio->ops->send_attr(vio);
239 static struct vio_version *find_by_major(struct vio_driver_state *vio,
242 struct vio_version *ret = NULL;
245 for (i = 0; i < vio->ver_table_entries; i++) {
246 struct vio_version *v = &vio->ver_table[i];
247 if (v->major <= major) {
255 static int process_ver_info(struct vio_driver_state *vio,
256 struct vio_ver_info *pkt)
258 struct vio_version *vap;
261 viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n",
262 pkt->major, pkt->minor, pkt->dev_class);
264 if (vio->hs_state != VIO_HS_INVALID) {
265 /* XXX Perhaps invoke start_handshake? XXX */
266 memset(&vio->ver, 0, sizeof(vio->ver));
267 vio->hs_state = VIO_HS_INVALID;
270 vap = find_by_major(vio, pkt->major);
272 vio->_peer_sid = pkt->tag.sid;
275 pkt->tag.stype = VIO_SUBTYPE_NACK;
278 viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n");
279 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
280 } else if (vap->major != pkt->major) {
281 pkt->tag.stype = VIO_SUBTYPE_NACK;
282 pkt->major = vap->major;
283 pkt->minor = vap->minor;
284 viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n",
285 pkt->major, pkt->minor);
286 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
288 struct vio_version ver = {
292 if (ver.minor > vap->minor)
293 ver.minor = vap->minor;
294 pkt->minor = ver.minor;
295 pkt->tag.stype = VIO_SUBTYPE_ACK;
296 pkt->dev_class = vio->dev_class;
297 viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
298 pkt->major, pkt->minor);
299 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
302 vio->hs_state = VIO_HS_GOTVERS;
306 return handshake_failure(vio);
311 static int process_ver_ack(struct vio_driver_state *vio,
312 struct vio_ver_info *pkt)
314 viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n",
315 pkt->major, pkt->minor, pkt->dev_class);
317 if (vio->hs_state & VIO_HS_GOTVERS) {
318 if (vio->ver.major != pkt->major ||
319 vio->ver.minor != pkt->minor) {
320 pkt->tag.stype = VIO_SUBTYPE_NACK;
321 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
322 return handshake_failure(vio);
325 vio->ver.major = pkt->major;
326 vio->ver.minor = pkt->minor;
327 vio->hs_state = VIO_HS_GOTVERS;
330 switch (vio->dev_class) {
333 if (send_attr(vio) < 0)
334 return handshake_failure(vio);
344 static int process_ver_nack(struct vio_driver_state *vio,
345 struct vio_ver_info *pkt)
347 struct vio_version *nver;
349 viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n",
350 pkt->major, pkt->minor, pkt->dev_class);
352 if (pkt->major == 0 && pkt->minor == 0)
353 return handshake_failure(vio);
354 nver = find_by_major(vio, pkt->major);
356 return handshake_failure(vio);
358 if (send_version(vio, nver->major, nver->minor) < 0)
359 return handshake_failure(vio);
364 static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
366 switch (pkt->tag.stype) {
367 case VIO_SUBTYPE_INFO:
368 return process_ver_info(vio, pkt);
370 case VIO_SUBTYPE_ACK:
371 return process_ver_ack(vio, pkt);
373 case VIO_SUBTYPE_NACK:
374 return process_ver_nack(vio, pkt);
377 return handshake_failure(vio);
381 static int process_attr(struct vio_driver_state *vio, void *pkt)
385 if (!(vio->hs_state & VIO_HS_GOTVERS))
386 return handshake_failure(vio);
391 err = vio->ops->handle_attr(vio, pkt);
393 return handshake_failure(vio);
395 vio->hs_state |= VIO_HS_GOT_ATTR;
397 if ((vio->dr_state & VIO_DR_STATE_TXREQ) &&
398 !(vio->hs_state & VIO_HS_SENT_DREG)) {
399 if (send_dreg(vio) < 0)
400 return handshake_failure(vio);
402 vio->hs_state |= VIO_HS_SENT_DREG;
409 static int all_drings_registered(struct vio_driver_state *vio)
411 int need_rx, need_tx;
413 need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ);
414 need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ);
417 !(vio->dr_state & VIO_DR_STATE_RXREG))
421 !(vio->dr_state & VIO_DR_STATE_TXREG))
427 static int process_dreg_info(struct vio_driver_state *vio,
428 struct vio_dring_register *pkt)
430 struct vio_dring_state *dr;
433 viodbg(HS, "GOT DRING_REG INFO ident[%llx] "
434 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
435 (unsigned long long) pkt->dring_ident,
436 pkt->num_descr, pkt->descr_size, pkt->options,
439 if (!(vio->dr_state & VIO_DR_STATE_RXREQ))
442 if (vio->dr_state & VIO_DR_STATE_RXREG)
445 /* v1.6 and higher, ACK with desired, supported mode, or NACK */
446 if (vio_version_after_eq(vio, 1, 6)) {
447 if (!(pkt->options & VIO_TX_DRING))
449 pkt->options = VIO_TX_DRING;
452 BUG_ON(vio->desc_buf);
454 vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
458 vio->desc_buf_len = pkt->descr_size;
460 dr = &vio->drings[VIO_DRIVER_RX_RING];
462 dr->num_entries = pkt->num_descr;
463 dr->entry_size = pkt->descr_size;
464 dr->ncookies = pkt->num_cookies;
465 for (i = 0; i < dr->ncookies; i++) {
466 dr->cookies[i] = pkt->cookies[i];
468 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
471 pkt->cookies[i].cookie_addr,
473 pkt->cookies[i].cookie_size);
476 pkt->tag.stype = VIO_SUBTYPE_ACK;
477 pkt->dring_ident = ++dr->ident;
479 viodbg(HS, "SEND DRING_REG ACK ident[%llx] "
480 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
481 (unsigned long long) pkt->dring_ident,
482 pkt->num_descr, pkt->descr_size, pkt->options,
485 len = (sizeof(*pkt) +
486 (dr->ncookies * sizeof(struct ldc_trans_cookie)));
487 if (send_ctrl(vio, &pkt->tag, len) < 0)
490 vio->dr_state |= VIO_DR_STATE_RXREG;
495 pkt->tag.stype = VIO_SUBTYPE_NACK;
496 viodbg(HS, "SEND DRING_REG NACK\n");
497 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
499 return handshake_failure(vio);
502 static int process_dreg_ack(struct vio_driver_state *vio,
503 struct vio_dring_register *pkt)
505 struct vio_dring_state *dr;
507 viodbg(HS, "GOT DRING_REG ACK ident[%llx] "
508 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
509 (unsigned long long) pkt->dring_ident,
510 pkt->num_descr, pkt->descr_size, pkt->options,
513 dr = &vio->drings[VIO_DRIVER_TX_RING];
515 if (!(vio->dr_state & VIO_DR_STATE_TXREQ))
516 return handshake_failure(vio);
518 dr->ident = pkt->dring_ident;
519 vio->dr_state |= VIO_DR_STATE_TXREG;
521 if (all_drings_registered(vio)) {
522 if (send_rdx(vio) < 0)
523 return handshake_failure(vio);
524 vio->hs_state = VIO_HS_SENT_RDX;
529 static int process_dreg_nack(struct vio_driver_state *vio,
530 struct vio_dring_register *pkt)
532 viodbg(HS, "GOT DRING_REG NACK ident[%llx] "
533 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
534 (unsigned long long) pkt->dring_ident,
535 pkt->num_descr, pkt->descr_size, pkt->options,
538 return handshake_failure(vio);
541 static int process_dreg(struct vio_driver_state *vio,
542 struct vio_dring_register *pkt)
544 if (!(vio->hs_state & VIO_HS_GOTVERS))
545 return handshake_failure(vio);
547 switch (pkt->tag.stype) {
548 case VIO_SUBTYPE_INFO:
549 return process_dreg_info(vio, pkt);
551 case VIO_SUBTYPE_ACK:
552 return process_dreg_ack(vio, pkt);
554 case VIO_SUBTYPE_NACK:
555 return process_dreg_nack(vio, pkt);
558 return handshake_failure(vio);
562 static int process_dunreg(struct vio_driver_state *vio,
563 struct vio_dring_unregister *pkt)
565 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING];
567 viodbg(HS, "GOT DRING_UNREG\n");
569 if (pkt->dring_ident != dr->ident)
572 vio->dr_state &= ~VIO_DR_STATE_RXREG;
574 memset(dr, 0, sizeof(*dr));
576 kfree(vio->desc_buf);
577 vio->desc_buf = NULL;
578 vio->desc_buf_len = 0;
583 static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt)
585 viodbg(HS, "GOT RDX INFO\n");
587 pkt->tag.stype = VIO_SUBTYPE_ACK;
588 viodbg(HS, "SEND RDX ACK\n");
589 if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0)
590 return handshake_failure(vio);
592 vio->hs_state |= VIO_HS_SENT_RDX_ACK;
596 static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt)
598 viodbg(HS, "GOT RDX ACK\n");
600 if (!(vio->hs_state & VIO_HS_SENT_RDX))
601 return handshake_failure(vio);
603 vio->hs_state |= VIO_HS_GOT_RDX_ACK;
607 static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt)
609 viodbg(HS, "GOT RDX NACK\n");
611 return handshake_failure(vio);
614 static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt)
616 if (!all_drings_registered(vio))
617 handshake_failure(vio);
619 switch (pkt->tag.stype) {
620 case VIO_SUBTYPE_INFO:
621 return process_rdx_info(vio, pkt);
623 case VIO_SUBTYPE_ACK:
624 return process_rdx_ack(vio, pkt);
626 case VIO_SUBTYPE_NACK:
627 return process_rdx_nack(vio, pkt);
630 return handshake_failure(vio);
634 int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
636 struct vio_msg_tag *tag = pkt;
637 u8 prev_state = vio->hs_state;
640 switch (tag->stype_env) {
642 err = process_ver(vio, pkt);
646 err = process_attr(vio, pkt);
650 err = process_dreg(vio, pkt);
653 case VIO_DRING_UNREG:
654 err = process_dunreg(vio, pkt);
658 err = process_rdx(vio, pkt);
662 err = process_unknown(vio, pkt);
667 vio->hs_state != prev_state &&
668 (vio->hs_state & VIO_HS_COMPLETE)) {
670 vio->ops->handshake_complete(vio);
675 EXPORT_SYMBOL(vio_control_pkt_engine);
677 void vio_conn_reset(struct vio_driver_state *vio)
680 EXPORT_SYMBOL(vio_conn_reset);
682 /* The issue is that the Solaris virtual disk server just mirrors the
683 * SID values it gets from the client peer. So we work around that
684 * here in vio_{validate,send}_sid() so that the drivers don't need
685 * to be aware of this crap.
687 int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp)
691 /* Always let VERSION+INFO packets through unchecked, they
692 * define the new SID.
694 if (tp->type == VIO_TYPE_CTRL &&
695 tp->stype == VIO_SUBTYPE_INFO &&
696 tp->stype_env == VIO_VER_INFO)
699 /* Ok, now figure out which SID to use. */
700 switch (vio->dev_class) {
702 case VDEV_NETWORK_SWITCH:
703 case VDEV_DISK_SERVER:
705 sid = vio->_peer_sid;
709 sid = vio->_local_sid;
715 viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n",
716 tp->sid, vio->_peer_sid, vio->_local_sid);
719 EXPORT_SYMBOL(vio_validate_sid);
721 u32 vio_send_sid(struct vio_driver_state *vio)
723 switch (vio->dev_class) {
725 case VDEV_NETWORK_SWITCH:
728 return vio->_local_sid;
730 case VDEV_DISK_SERVER:
731 return vio->_peer_sid;
734 EXPORT_SYMBOL(vio_send_sid);
736 int vio_ldc_alloc(struct vio_driver_state *vio,
737 struct ldc_channel_config *base_cfg,
740 struct ldc_channel_config cfg = *base_cfg;
741 struct ldc_channel *lp;
743 cfg.tx_irq = vio->vdev->tx_irq;
744 cfg.rx_irq = vio->vdev->rx_irq;
746 lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name);
754 EXPORT_SYMBOL(vio_ldc_alloc);
756 void vio_ldc_free(struct vio_driver_state *vio)
761 kfree(vio->desc_buf);
762 vio->desc_buf = NULL;
763 vio->desc_buf_len = 0;
765 EXPORT_SYMBOL(vio_ldc_free);
767 void vio_port_up(struct vio_driver_state *vio)
772 spin_lock_irqsave(&vio->lock, flags);
774 state = ldc_state(vio->lp);
777 if (state == LDC_STATE_INIT) {
778 err = ldc_bind(vio->lp);
780 printk(KERN_WARNING "%s: Port %lu bind failed, "
782 vio->name, vio->vdev->channel_id, err);
786 if (ldc_mode(vio->lp) == LDC_MODE_RAW)
787 ldc_set_state(vio->lp, LDC_STATE_CONNECTED);
789 err = ldc_connect(vio->lp);
792 printk(KERN_WARNING "%s: Port %lu connect failed, "
794 vio->name, vio->vdev->channel_id, err);
797 unsigned long expires = jiffies + HZ;
799 expires = round_jiffies(expires);
800 mod_timer(&vio->timer, expires);
803 spin_unlock_irqrestore(&vio->lock, flags);
805 EXPORT_SYMBOL(vio_port_up);
807 static void vio_port_timer(struct timer_list *t)
809 struct vio_driver_state *vio = from_timer(vio, t, timer);
814 int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
815 u8 dev_class, struct vio_version *ver_table,
816 int ver_table_size, struct vio_driver_ops *ops,
821 case VDEV_NETWORK_SWITCH:
823 case VDEV_DISK_SERVER:
824 case VDEV_CONSOLE_CON:
831 if (dev_class == VDEV_NETWORK ||
832 dev_class == VDEV_NETWORK_SWITCH ||
833 dev_class == VDEV_DISK ||
834 dev_class == VDEV_DISK_SERVER) {
835 if (!ops || !ops->send_attr || !ops->handle_attr ||
836 !ops->handshake_complete)
840 if (!ver_table || ver_table_size < 0)
846 spin_lock_init(&vio->lock);
850 vio->dev_class = dev_class;
853 vio->ver_table = ver_table;
854 vio->ver_table_entries = ver_table_size;
858 timer_setup(&vio->timer, vio_port_timer, 0);
862 EXPORT_SYMBOL(vio_driver_init);