1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - bus logic (NHI independent)
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
18 * struct tb_cm - Simple Thunderbolt connection manager
19 * @tunnel_list: List of active tunnels
20 * @dp_resources: List of available DP resources for DP tunneling
21 * @hotplug_active: tb_handle_hotplug will stop progressing plug
22 * events and exit if this is not set (it needs to
23 * acquire the lock one more time). Used to drain wq
24 * after cfg has been paused.
27 struct list_head tunnel_list;
28 struct list_head dp_resources;
32 struct tb_hotplug_event {
33 struct work_struct work;
40 static void tb_handle_hotplug(struct work_struct *work);
42 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
44 struct tb_hotplug_event *ev;
46 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
54 INIT_WORK(&ev->work, tb_handle_hotplug);
55 queue_work(tb->wq, &ev->work);
58 /* enumeration & hot plug handling */
60 static void tb_add_dp_resources(struct tb_switch *sw)
62 struct tb_cm *tcm = tb_priv(sw->tb);
65 tb_switch_for_each_port(sw, port) {
66 if (!tb_port_is_dpin(port))
69 if (!tb_switch_query_dp_resource(sw, port))
72 list_add_tail(&port->list, &tcm->dp_resources);
73 tb_port_dbg(port, "DP IN resource available\n");
77 static void tb_remove_dp_resources(struct tb_switch *sw)
79 struct tb_cm *tcm = tb_priv(sw->tb);
80 struct tb_port *port, *tmp;
82 /* Clear children resources first */
83 tb_switch_for_each_port(sw, port) {
84 if (tb_port_has_remote(port))
85 tb_remove_dp_resources(port->remote->sw);
88 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
90 tb_port_dbg(port, "DP OUT resource unavailable\n");
91 list_del_init(&port->list);
96 static void tb_discover_tunnels(struct tb_switch *sw)
98 struct tb *tb = sw->tb;
99 struct tb_cm *tcm = tb_priv(tb);
100 struct tb_port *port;
102 tb_switch_for_each_port(sw, port) {
103 struct tb_tunnel *tunnel = NULL;
105 switch (port->config.type) {
106 case TB_TYPE_DP_HDMI_IN:
107 tunnel = tb_tunnel_discover_dp(tb, port);
110 case TB_TYPE_PCIE_DOWN:
111 tunnel = tb_tunnel_discover_pci(tb, port);
121 if (tb_tunnel_is_pci(tunnel)) {
122 struct tb_switch *parent = tunnel->dst_port->sw;
124 while (parent != tunnel->src_port->sw) {
126 parent = tb_switch_parent(parent);
130 list_add_tail(&tunnel->list, &tcm->tunnel_list);
133 tb_switch_for_each_port(sw, port) {
134 if (tb_port_has_remote(port))
135 tb_discover_tunnels(port->remote->sw);
139 static void tb_scan_xdomain(struct tb_port *port)
141 struct tb_switch *sw = port->sw;
142 struct tb *tb = sw->tb;
143 struct tb_xdomain *xd;
146 route = tb_downstream_route(port);
147 xd = tb_xdomain_find_by_route(tb, route);
153 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
156 tb_port_at(route, sw)->xdomain = xd;
161 static void tb_scan_port(struct tb_port *port);
164 * tb_scan_switch() - scan for and initialize downstream switches
166 static void tb_scan_switch(struct tb_switch *sw)
168 struct tb_port *port;
170 tb_switch_for_each_port(sw, port)
175 * tb_scan_port() - check for and initialize switches below port
177 static void tb_scan_port(struct tb_port *port)
179 struct tb_cm *tcm = tb_priv(port->sw->tb);
180 struct tb_port *upstream_port;
181 struct tb_switch *sw;
183 if (tb_is_upstream_port(port))
186 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
187 !tb_dp_port_is_enabled(port)) {
188 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
189 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
194 if (port->config.type != TB_TYPE_PORT)
196 if (port->dual_link_port && port->link_nr)
198 * Downstream switch is reachable through two ports.
199 * Only scan on the primary port (link_nr == 0).
201 if (tb_wait_for_port(port, false) <= 0)
204 tb_port_dbg(port, "port already has a remote\n");
207 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
208 tb_downstream_route(port));
211 * If there is an error accessing the connected switch
212 * it may be connected to another domain. Also we allow
213 * the other domain to be connected to a max depth switch.
215 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
216 tb_scan_xdomain(port);
220 if (tb_switch_configure(sw)) {
226 * If there was previously another domain connected remove it
230 tb_xdomain_remove(port->xdomain);
231 port->xdomain = NULL;
235 * Do not send uevents until we have discovered all existing
236 * tunnels and know which switches were authorized already by
239 if (!tcm->hotplug_active)
240 dev_set_uevent_suppress(&sw->dev, true);
242 if (tb_switch_add(sw)) {
247 /* Link the switches using both links if available */
248 upstream_port = tb_upstream_port(sw);
249 port->remote = upstream_port;
250 upstream_port->remote = port;
251 if (port->dual_link_port && upstream_port->dual_link_port) {
252 port->dual_link_port->remote = upstream_port->dual_link_port;
253 upstream_port->dual_link_port->remote = port->dual_link_port;
256 /* Enable lane bonding if supported */
257 if (tb_switch_lane_bonding_enable(sw))
258 tb_sw_warn(sw, "failed to enable lane bonding\n");
263 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
264 struct tb_port *src_port,
265 struct tb_port *dst_port)
267 struct tb_cm *tcm = tb_priv(tb);
268 struct tb_tunnel *tunnel;
270 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
271 if (tunnel->type == type &&
272 ((src_port && src_port == tunnel->src_port) ||
273 (dst_port && dst_port == tunnel->dst_port))) {
281 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
286 tb_tunnel_deactivate(tunnel);
287 list_del(&tunnel->list);
290 * In case of DP tunnel make sure the DP IN resource is deallocated
293 if (tb_tunnel_is_dp(tunnel)) {
294 struct tb_port *in = tunnel->src_port;
296 tb_switch_dealloc_dp_resource(in->sw, in);
299 tb_tunnel_free(tunnel);
303 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
305 static void tb_free_invalid_tunnels(struct tb *tb)
307 struct tb_cm *tcm = tb_priv(tb);
308 struct tb_tunnel *tunnel;
311 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
312 if (tb_tunnel_is_invalid(tunnel))
313 tb_deactivate_and_free_tunnel(tunnel);
318 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
320 static void tb_free_unplugged_children(struct tb_switch *sw)
322 struct tb_port *port;
324 tb_switch_for_each_port(sw, port) {
325 if (!tb_port_has_remote(port))
328 if (port->remote->sw->is_unplugged) {
329 tb_remove_dp_resources(port->remote->sw);
330 tb_switch_lane_bonding_disable(port->remote->sw);
331 tb_switch_remove(port->remote->sw);
333 if (port->dual_link_port)
334 port->dual_link_port->remote = NULL;
336 tb_free_unplugged_children(port->remote->sw);
342 * tb_find_port() - return the first port of @type on @sw or NULL
343 * @sw: Switch to find the port from
344 * @type: Port type to look for
346 static struct tb_port *tb_find_port(struct tb_switch *sw,
347 enum tb_port_type type)
349 struct tb_port *port;
351 tb_switch_for_each_port(sw, port) {
352 if (port->config.type == type)
360 * tb_find_unused_port() - return the first inactive port on @sw
361 * @sw: Switch to find the port on
362 * @type: Port type to look for
364 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
365 enum tb_port_type type)
367 struct tb_port *port;
369 tb_switch_for_each_port(sw, port) {
370 if (tb_is_upstream_port(port))
372 if (port->config.type != type)
376 if (tb_port_is_enabled(port))
383 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
384 const struct tb_port *port)
387 * To keep plugging devices consistently in the same PCIe
388 * hierarchy, do mapping here for root switch downstream PCIe
392 int phy_port = tb_phy_port_from_link(port->port);
396 * Hard-coded Thunderbolt port to PCIe down port mapping
399 if (tb_switch_is_cactus_ridge(sw) ||
400 tb_switch_is_alpine_ridge(sw))
401 index = !phy_port ? 6 : 7;
402 else if (tb_switch_is_falcon_ridge(sw))
403 index = !phy_port ? 6 : 8;
404 else if (tb_switch_is_titan_ridge(sw))
405 index = !phy_port ? 8 : 9;
409 /* Validate the hard-coding */
410 if (WARN_ON(index > sw->config.max_port_number))
412 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
414 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
417 return &sw->ports[index];
421 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
424 static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in,
427 struct tb_switch *sw = out->sw;
428 struct tb_tunnel *tunnel;
429 int bw, available_bw = 40000;
431 while (sw && sw != in->sw) {
432 bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */
433 /* Leave 10% guard band */
437 * Check for any active DP tunnels that go through this
438 * switch and reduce their consumed bandwidth from
441 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
444 if (!tb_tunnel_switch_on_path(tunnel, sw))
447 consumed_bw = tb_tunnel_consumed_bandwidth(tunnel);
454 if (bw < available_bw)
457 sw = tb_switch_parent(sw);
463 static void tb_tunnel_dp(struct tb *tb)
465 struct tb_cm *tcm = tb_priv(tb);
466 struct tb_port *port, *in, *out;
467 struct tb_tunnel *tunnel;
471 * Find pair of inactive DP IN and DP OUT adapters and then
472 * establish a DP tunnel between them.
474 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
478 list_for_each_entry(port, &tcm->dp_resources, list) {
479 if (tb_port_is_enabled(port)) {
480 tb_port_dbg(port, "in use\n");
484 tb_port_dbg(port, "available\n");
486 if (!in && tb_port_is_dpin(port))
488 else if (!out && tb_port_is_dpout(port))
493 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
497 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
501 if (tb_switch_alloc_dp_resource(in->sw, in)) {
502 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
506 /* Calculate available bandwidth between in and out */
507 available_bw = tb_available_bw(tcm, in, out);
508 if (available_bw < 0) {
509 tb_warn(tb, "failed to determine available bandwidth\n");
513 tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n",
516 tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw);
518 tb_port_dbg(out, "could not allocate DP tunnel\n");
522 if (tb_tunnel_activate(tunnel)) {
523 tb_port_info(out, "DP tunnel activation failed, aborting\n");
524 tb_tunnel_free(tunnel);
528 list_add_tail(&tunnel->list, &tcm->tunnel_list);
532 tb_switch_dealloc_dp_resource(in->sw, in);
535 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
537 struct tb_port *in, *out;
538 struct tb_tunnel *tunnel;
540 if (tb_port_is_dpin(port)) {
541 tb_port_dbg(port, "DP IN resource unavailable\n");
545 tb_port_dbg(port, "DP OUT resource unavailable\n");
550 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
551 tb_deactivate_and_free_tunnel(tunnel);
552 list_del_init(&port->list);
555 * See if there is another DP OUT port that can be used for
556 * to create another tunnel.
561 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
563 struct tb_cm *tcm = tb_priv(tb);
566 if (tb_port_is_enabled(port))
569 list_for_each_entry(p, &tcm->dp_resources, list) {
574 tb_port_dbg(port, "DP %s resource available\n",
575 tb_port_is_dpin(port) ? "IN" : "OUT");
576 list_add_tail(&port->list, &tcm->dp_resources);
578 /* Look for suitable DP IN <-> DP OUT pairs now */
582 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
584 struct tb_port *up, *down, *port;
585 struct tb_cm *tcm = tb_priv(tb);
586 struct tb_switch *parent_sw;
587 struct tb_tunnel *tunnel;
589 up = tb_find_port(sw, TB_TYPE_PCIE_UP);
594 * Look up available down port. Since we are chaining it should
595 * be found right above this switch.
597 parent_sw = tb_to_switch(sw->dev.parent);
598 port = tb_port_at(tb_route(sw), parent_sw);
599 down = tb_find_pcie_down(parent_sw, port);
603 tunnel = tb_tunnel_alloc_pci(tb, up, down);
607 if (tb_tunnel_activate(tunnel)) {
609 "PCIe tunnel activation failed, aborting\n");
610 tb_tunnel_free(tunnel);
614 list_add_tail(&tunnel->list, &tcm->tunnel_list);
618 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
620 struct tb_cm *tcm = tb_priv(tb);
621 struct tb_port *nhi_port, *dst_port;
622 struct tb_tunnel *tunnel;
623 struct tb_switch *sw;
625 sw = tb_to_switch(xd->dev.parent);
626 dst_port = tb_port_at(xd->route, sw);
627 nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
629 mutex_lock(&tb->lock);
630 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
631 xd->transmit_path, xd->receive_ring,
634 mutex_unlock(&tb->lock);
638 if (tb_tunnel_activate(tunnel)) {
639 tb_port_info(nhi_port,
640 "DMA tunnel activation failed, aborting\n");
641 tb_tunnel_free(tunnel);
642 mutex_unlock(&tb->lock);
646 list_add_tail(&tunnel->list, &tcm->tunnel_list);
647 mutex_unlock(&tb->lock);
651 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
653 struct tb_port *dst_port;
654 struct tb_tunnel *tunnel;
655 struct tb_switch *sw;
657 sw = tb_to_switch(xd->dev.parent);
658 dst_port = tb_port_at(xd->route, sw);
661 * It is possible that the tunnel was already teared down (in
662 * case of cable disconnect) so it is fine if we cannot find it
665 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
666 tb_deactivate_and_free_tunnel(tunnel);
669 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
671 if (!xd->is_unplugged) {
672 mutex_lock(&tb->lock);
673 __tb_disconnect_xdomain_paths(tb, xd);
674 mutex_unlock(&tb->lock);
679 /* hotplug handling */
682 * tb_handle_hotplug() - handle hotplug event
684 * Executes on tb->wq.
686 static void tb_handle_hotplug(struct work_struct *work)
688 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
689 struct tb *tb = ev->tb;
690 struct tb_cm *tcm = tb_priv(tb);
691 struct tb_switch *sw;
692 struct tb_port *port;
693 mutex_lock(&tb->lock);
694 if (!tcm->hotplug_active)
695 goto out; /* during init, suspend or shutdown */
697 sw = tb_switch_find_by_route(tb, ev->route);
700 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
701 ev->route, ev->port, ev->unplug);
704 if (ev->port > sw->config.max_port_number) {
706 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
707 ev->route, ev->port, ev->unplug);
710 port = &sw->ports[ev->port];
711 if (tb_is_upstream_port(port)) {
712 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
713 ev->route, ev->port, ev->unplug);
717 if (tb_port_has_remote(port)) {
718 tb_port_dbg(port, "switch unplugged\n");
719 tb_sw_set_unplugged(port->remote->sw);
720 tb_free_invalid_tunnels(tb);
721 tb_remove_dp_resources(port->remote->sw);
722 tb_switch_lane_bonding_disable(port->remote->sw);
723 tb_switch_remove(port->remote->sw);
725 if (port->dual_link_port)
726 port->dual_link_port->remote = NULL;
727 /* Maybe we can create another DP tunnel */
729 } else if (port->xdomain) {
730 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
732 tb_port_dbg(port, "xdomain unplugged\n");
734 * Service drivers are unbound during
735 * tb_xdomain_remove() so setting XDomain as
736 * unplugged here prevents deadlock if they call
737 * tb_xdomain_disable_paths(). We will tear down
740 xd->is_unplugged = true;
741 tb_xdomain_remove(xd);
742 port->xdomain = NULL;
743 __tb_disconnect_xdomain_paths(tb, xd);
745 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
746 tb_dp_resource_unavailable(tb, port);
749 "got unplug event for disconnected port, ignoring\n");
751 } else if (port->remote) {
752 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
754 if (tb_port_is_null(port)) {
755 tb_port_dbg(port, "hotplug: scanning\n");
758 tb_port_dbg(port, "hotplug: no switch found\n");
759 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
760 tb_dp_resource_available(tb, port);
767 mutex_unlock(&tb->lock);
772 * tb_schedule_hotplug_handler() - callback function for the control channel
774 * Delegates to tb_handle_hotplug.
776 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
777 const void *buf, size_t size)
779 const struct cfg_event_pkg *pkg = buf;
782 if (type != TB_CFG_PKG_EVENT) {
783 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
787 route = tb_cfg_get_route(&pkg->header);
789 if (tb_cfg_error(tb->ctl, route, pkg->port,
790 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
791 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
795 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
798 static void tb_stop(struct tb *tb)
800 struct tb_cm *tcm = tb_priv(tb);
801 struct tb_tunnel *tunnel;
804 /* tunnels are only present after everything has been initialized */
805 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
807 * DMA tunnels require the driver to be functional so we
808 * tear them down. Other protocol tunnels can be left
811 if (tb_tunnel_is_dma(tunnel))
812 tb_tunnel_deactivate(tunnel);
813 tb_tunnel_free(tunnel);
815 tb_switch_remove(tb->root_switch);
816 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
819 static int tb_scan_finalize_switch(struct device *dev, void *data)
821 if (tb_is_switch(dev)) {
822 struct tb_switch *sw = tb_to_switch(dev);
825 * If we found that the switch was already setup by the
826 * boot firmware, mark it as authorized now before we
827 * send uevent to userspace.
832 dev_set_uevent_suppress(dev, false);
833 kobject_uevent(&dev->kobj, KOBJ_ADD);
834 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
840 static int tb_start(struct tb *tb)
842 struct tb_cm *tcm = tb_priv(tb);
845 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
846 if (IS_ERR(tb->root_switch))
847 return PTR_ERR(tb->root_switch);
850 * ICM firmware upgrade needs running firmware and in native
851 * mode that is not available so disable firmware upgrade of the
854 tb->root_switch->no_nvm_upgrade = true;
856 ret = tb_switch_configure(tb->root_switch);
858 tb_switch_put(tb->root_switch);
862 /* Announce the switch to the world */
863 ret = tb_switch_add(tb->root_switch);
865 tb_switch_put(tb->root_switch);
869 /* Full scan to discover devices added before the driver was loaded. */
870 tb_scan_switch(tb->root_switch);
871 /* Find out tunnels created by the boot firmware */
872 tb_discover_tunnels(tb->root_switch);
873 /* Add DP IN resources for the root switch */
874 tb_add_dp_resources(tb->root_switch);
875 /* Make the discovered switches available to the userspace */
876 device_for_each_child(&tb->root_switch->dev, NULL,
877 tb_scan_finalize_switch);
879 /* Allow tb_handle_hotplug to progress events */
880 tcm->hotplug_active = true;
884 static int tb_suspend_noirq(struct tb *tb)
886 struct tb_cm *tcm = tb_priv(tb);
888 tb_dbg(tb, "suspending...\n");
889 tb_switch_suspend(tb->root_switch);
890 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
891 tb_dbg(tb, "suspend finished\n");
896 static void tb_restore_children(struct tb_switch *sw)
898 struct tb_port *port;
900 tb_switch_for_each_port(sw, port) {
901 if (!tb_port_has_remote(port))
904 if (tb_switch_lane_bonding_enable(port->remote->sw))
905 dev_warn(&sw->dev, "failed to restore lane bonding\n");
907 tb_restore_children(port->remote->sw);
911 static int tb_resume_noirq(struct tb *tb)
913 struct tb_cm *tcm = tb_priv(tb);
914 struct tb_tunnel *tunnel, *n;
916 tb_dbg(tb, "resuming...\n");
918 /* remove any pci devices the firmware might have setup */
919 tb_switch_reset(tb, 0);
921 tb_switch_resume(tb->root_switch);
922 tb_free_invalid_tunnels(tb);
923 tb_free_unplugged_children(tb->root_switch);
924 tb_restore_children(tb->root_switch);
925 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
926 tb_tunnel_restart(tunnel);
927 if (!list_empty(&tcm->tunnel_list)) {
929 * the pcie links need some time to get going.
930 * 100ms works for me...
932 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
935 /* Allow tb_handle_hotplug to progress events */
936 tcm->hotplug_active = true;
937 tb_dbg(tb, "resume finished\n");
942 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
944 struct tb_port *port;
947 tb_switch_for_each_port(sw, port) {
948 if (tb_is_upstream_port(port))
950 if (port->xdomain && port->xdomain->is_unplugged) {
951 tb_xdomain_remove(port->xdomain);
952 port->xdomain = NULL;
954 } else if (port->remote) {
955 ret += tb_free_unplugged_xdomains(port->remote->sw);
962 static void tb_complete(struct tb *tb)
965 * Release any unplugged XDomains and if there is a case where
966 * another domain is swapped in place of unplugged XDomain we
967 * need to run another rescan.
969 mutex_lock(&tb->lock);
970 if (tb_free_unplugged_xdomains(tb->root_switch))
971 tb_scan_switch(tb->root_switch);
972 mutex_unlock(&tb->lock);
975 static const struct tb_cm_ops tb_cm_ops = {
978 .suspend_noirq = tb_suspend_noirq,
979 .resume_noirq = tb_resume_noirq,
980 .complete = tb_complete,
981 .handle_event = tb_handle_event,
982 .approve_switch = tb_tunnel_pci,
983 .approve_xdomain_paths = tb_approve_xdomain_paths,
984 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
987 struct tb *tb_probe(struct tb_nhi *nhi)
992 tb = tb_domain_alloc(nhi, sizeof(*tcm));
996 tb->security_level = TB_SECURITY_USER;
997 tb->cm_ops = &tb_cm_ops;
1000 INIT_LIST_HEAD(&tcm->tunnel_list);
1001 INIT_LIST_HEAD(&tcm->dp_resources);