]> Git Repo - linux.git/blob - drivers/thunderbolt/tb.c
Merge branches 'acpi-resource', 'acpi-numa', 'acpi-soc' and 'acpi-misc'
[linux.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <[email protected]>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
14
15 #include "tb.h"
16 #include "tb_regs.h"
17 #include "tunnel.h"
18
19 #define TB_TIMEOUT              100     /* ms */
20
21 /*
22  * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
23  * direction. This is 40G - 10% guard band bandwidth.
24  */
25 #define TB_ASYM_MIN             (40000 * 90 / 100)
26
27 /*
28  * Threshold bandwidth (in Mb/s) that is used to switch the links to
29  * asymmetric and back. This is selected as 45G which means when the
30  * request is higher than this, we switch the link to asymmetric, and
31  * when it is less than this we switch it back. The 45G is selected so
32  * that we still have 27G (of the total 72G) for bulk PCIe traffic when
33  * switching back to symmetric.
34  */
35 #define TB_ASYM_THRESHOLD       45000
36
37 #define MAX_GROUPS              7       /* max Group_ID is 7 */
38
39 static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
40 module_param_named(asym_threshold, asym_threshold, uint, 0444);
41 MODULE_PARM_DESC(asym_threshold,
42                 "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
43                 __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
44
45 /**
46  * struct tb_cm - Simple Thunderbolt connection manager
47  * @tunnel_list: List of active tunnels
48  * @dp_resources: List of available DP resources for DP tunneling
49  * @hotplug_active: tb_handle_hotplug will stop progressing plug
50  *                  events and exit if this is not set (it needs to
51  *                  acquire the lock one more time). Used to drain wq
52  *                  after cfg has been paused.
53  * @remove_work: Work used to remove any unplugged routers after
54  *               runtime resume
55  * @groups: Bandwidth groups used in this domain.
56  */
57 struct tb_cm {
58         struct list_head tunnel_list;
59         struct list_head dp_resources;
60         bool hotplug_active;
61         struct delayed_work remove_work;
62         struct tb_bandwidth_group groups[MAX_GROUPS];
63 };
64
65 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
66 {
67         return ((void *)tcm - sizeof(struct tb));
68 }
69
70 struct tb_hotplug_event {
71         struct work_struct work;
72         struct tb *tb;
73         u64 route;
74         u8 port;
75         bool unplug;
76 };
77
78 static void tb_init_bandwidth_groups(struct tb_cm *tcm)
79 {
80         int i;
81
82         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
83                 struct tb_bandwidth_group *group = &tcm->groups[i];
84
85                 group->tb = tcm_to_tb(tcm);
86                 group->index = i + 1;
87                 INIT_LIST_HEAD(&group->ports);
88         }
89 }
90
91 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
92                                            struct tb_port *in)
93 {
94         if (!group || WARN_ON(in->group))
95                 return;
96
97         in->group = group;
98         list_add_tail(&in->group_list, &group->ports);
99
100         tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
101 }
102
103 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
104 {
105         int i;
106
107         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
108                 struct tb_bandwidth_group *group = &tcm->groups[i];
109
110                 if (list_empty(&group->ports))
111                         return group;
112         }
113
114         return NULL;
115 }
116
117 static struct tb_bandwidth_group *
118 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
119                           struct tb_port *out)
120 {
121         struct tb_bandwidth_group *group;
122         struct tb_tunnel *tunnel;
123
124         /*
125          * Find all DP tunnels that go through all the same USB4 links
126          * as this one. Because we always setup tunnels the same way we
127          * can just check for the routers at both ends of the tunnels
128          * and if they are the same we have a match.
129          */
130         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
131                 if (!tb_tunnel_is_dp(tunnel))
132                         continue;
133
134                 if (tunnel->src_port->sw == in->sw &&
135                     tunnel->dst_port->sw == out->sw) {
136                         group = tunnel->src_port->group;
137                         if (group) {
138                                 tb_bandwidth_group_attach_port(group, in);
139                                 return group;
140                         }
141                 }
142         }
143
144         /* Pick up next available group then */
145         group = tb_find_free_bandwidth_group(tcm);
146         if (group)
147                 tb_bandwidth_group_attach_port(group, in);
148         else
149                 tb_port_warn(in, "no available bandwidth groups\n");
150
151         return group;
152 }
153
154 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
155                                         struct tb_port *out)
156 {
157         if (usb4_dp_port_bandwidth_mode_enabled(in)) {
158                 int index, i;
159
160                 index = usb4_dp_port_group_id(in);
161                 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
162                         if (tcm->groups[i].index == index) {
163                                 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
164                                 return;
165                         }
166                 }
167         }
168
169         tb_attach_bandwidth_group(tcm, in, out);
170 }
171
172 static void tb_detach_bandwidth_group(struct tb_port *in)
173 {
174         struct tb_bandwidth_group *group = in->group;
175
176         if (group) {
177                 in->group = NULL;
178                 list_del_init(&in->group_list);
179
180                 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
181         }
182 }
183
184 static void tb_handle_hotplug(struct work_struct *work);
185
186 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
187 {
188         struct tb_hotplug_event *ev;
189
190         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
191         if (!ev)
192                 return;
193
194         ev->tb = tb;
195         ev->route = route;
196         ev->port = port;
197         ev->unplug = unplug;
198         INIT_WORK(&ev->work, tb_handle_hotplug);
199         queue_work(tb->wq, &ev->work);
200 }
201
202 /* enumeration & hot plug handling */
203
204 static void tb_add_dp_resources(struct tb_switch *sw)
205 {
206         struct tb_cm *tcm = tb_priv(sw->tb);
207         struct tb_port *port;
208
209         tb_switch_for_each_port(sw, port) {
210                 if (!tb_port_is_dpin(port))
211                         continue;
212
213                 if (!tb_switch_query_dp_resource(sw, port))
214                         continue;
215
216                 /*
217                  * If DP IN on device router exist, position it at the
218                  * beginning of the DP resources list, so that it is used
219                  * before DP IN of the host router. This way external GPU(s)
220                  * will be prioritized when pairing DP IN to a DP OUT.
221                  */
222                 if (tb_route(sw))
223                         list_add(&port->list, &tcm->dp_resources);
224                 else
225                         list_add_tail(&port->list, &tcm->dp_resources);
226
227                 tb_port_dbg(port, "DP IN resource available\n");
228         }
229 }
230
231 static void tb_remove_dp_resources(struct tb_switch *sw)
232 {
233         struct tb_cm *tcm = tb_priv(sw->tb);
234         struct tb_port *port, *tmp;
235
236         /* Clear children resources first */
237         tb_switch_for_each_port(sw, port) {
238                 if (tb_port_has_remote(port))
239                         tb_remove_dp_resources(port->remote->sw);
240         }
241
242         list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
243                 if (port->sw == sw) {
244                         tb_port_dbg(port, "DP OUT resource unavailable\n");
245                         list_del_init(&port->list);
246                 }
247         }
248 }
249
250 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
251 {
252         struct tb_cm *tcm = tb_priv(tb);
253         struct tb_port *p;
254
255         list_for_each_entry(p, &tcm->dp_resources, list) {
256                 if (p == port)
257                         return;
258         }
259
260         tb_port_dbg(port, "DP %s resource available discovered\n",
261                     tb_port_is_dpin(port) ? "IN" : "OUT");
262         list_add_tail(&port->list, &tcm->dp_resources);
263 }
264
265 static void tb_discover_dp_resources(struct tb *tb)
266 {
267         struct tb_cm *tcm = tb_priv(tb);
268         struct tb_tunnel *tunnel;
269
270         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
271                 if (tb_tunnel_is_dp(tunnel))
272                         tb_discover_dp_resource(tb, tunnel->dst_port);
273         }
274 }
275
276 /* Enables CL states up to host router */
277 static int tb_enable_clx(struct tb_switch *sw)
278 {
279         struct tb_cm *tcm = tb_priv(sw->tb);
280         unsigned int clx = TB_CL0S | TB_CL1;
281         const struct tb_tunnel *tunnel;
282         int ret;
283
284         /*
285          * Currently only enable CLx for the first link. This is enough
286          * to allow the CPU to save energy at least on Intel hardware
287          * and makes it slightly simpler to implement. We may change
288          * this in the future to cover the whole topology if it turns
289          * out to be beneficial.
290          */
291         while (sw && tb_switch_depth(sw) > 1)
292                 sw = tb_switch_parent(sw);
293
294         if (!sw)
295                 return 0;
296
297         if (tb_switch_depth(sw) != 1)
298                 return 0;
299
300         /*
301          * If we are re-enabling then check if there is an active DMA
302          * tunnel and in that case bail out.
303          */
304         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
305                 if (tb_tunnel_is_dma(tunnel)) {
306                         if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
307                                 return 0;
308                 }
309         }
310
311         /*
312          * Initially try with CL2. If that's not supported by the
313          * topology try with CL0s and CL1 and then give up.
314          */
315         ret = tb_switch_clx_enable(sw, clx | TB_CL2);
316         if (ret == -EOPNOTSUPP)
317                 ret = tb_switch_clx_enable(sw, clx);
318         return ret == -EOPNOTSUPP ? 0 : ret;
319 }
320
321 /**
322  * tb_disable_clx() - Disable CL states up to host router
323  * @sw: Router to start
324  *
325  * Disables CL states from @sw up to the host router. Returns true if
326  * any CL state were disabled. This can be used to figure out whether
327  * the link was setup by us or the boot firmware so we don't
328  * accidentally enable them if they were not enabled during discovery.
329  */
330 static bool tb_disable_clx(struct tb_switch *sw)
331 {
332         bool disabled = false;
333
334         do {
335                 int ret;
336
337                 ret = tb_switch_clx_disable(sw);
338                 if (ret > 0)
339                         disabled = true;
340                 else if (ret < 0)
341                         tb_sw_warn(sw, "failed to disable CL states\n");
342
343                 sw = tb_switch_parent(sw);
344         } while (sw);
345
346         return disabled;
347 }
348
349 static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
350 {
351         struct tb_switch *sw;
352
353         sw = tb_to_switch(dev);
354         if (!sw)
355                 return 0;
356
357         if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
358                 enum tb_switch_tmu_mode mode;
359                 int ret;
360
361                 if (tb_switch_clx_is_enabled(sw, TB_CL1))
362                         mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
363                 else
364                         mode = TB_SWITCH_TMU_MODE_HIFI_BI;
365
366                 ret = tb_switch_tmu_configure(sw, mode);
367                 if (ret)
368                         return ret;
369
370                 return tb_switch_tmu_enable(sw);
371         }
372
373         return 0;
374 }
375
376 static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
377 {
378         struct tb_switch *sw;
379
380         if (!tunnel)
381                 return;
382
383         /*
384          * Once first DP tunnel is established we change the TMU
385          * accuracy of first depth child routers (and the host router)
386          * to the highest. This is needed for the DP tunneling to work
387          * but also allows CL0s.
388          *
389          * If both routers are v2 then we don't need to do anything as
390          * they are using enhanced TMU mode that allows all CLx.
391          */
392         sw = tunnel->tb->root_switch;
393         device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
394 }
395
396 static int tb_enable_tmu(struct tb_switch *sw)
397 {
398         int ret;
399
400         /*
401          * If both routers at the end of the link are v2 we simply
402          * enable the enhanched uni-directional mode. That covers all
403          * the CL states. For v1 and before we need to use the normal
404          * rate to allow CL1 (when supported). Otherwise we keep the TMU
405          * running at the highest accuracy.
406          */
407         ret = tb_switch_tmu_configure(sw,
408                         TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
409         if (ret == -EOPNOTSUPP) {
410                 if (tb_switch_clx_is_enabled(sw, TB_CL1))
411                         ret = tb_switch_tmu_configure(sw,
412                                         TB_SWITCH_TMU_MODE_LOWRES);
413                 else
414                         ret = tb_switch_tmu_configure(sw,
415                                         TB_SWITCH_TMU_MODE_HIFI_BI);
416         }
417         if (ret)
418                 return ret;
419
420         /* If it is already enabled in correct mode, don't touch it */
421         if (tb_switch_tmu_is_enabled(sw))
422                 return 0;
423
424         ret = tb_switch_tmu_disable(sw);
425         if (ret)
426                 return ret;
427
428         ret = tb_switch_tmu_post_time(sw);
429         if (ret)
430                 return ret;
431
432         return tb_switch_tmu_enable(sw);
433 }
434
435 static void tb_switch_discover_tunnels(struct tb_switch *sw,
436                                        struct list_head *list,
437                                        bool alloc_hopids)
438 {
439         struct tb *tb = sw->tb;
440         struct tb_port *port;
441
442         tb_switch_for_each_port(sw, port) {
443                 struct tb_tunnel *tunnel = NULL;
444
445                 switch (port->config.type) {
446                 case TB_TYPE_DP_HDMI_IN:
447                         tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
448                         tb_increase_tmu_accuracy(tunnel);
449                         break;
450
451                 case TB_TYPE_PCIE_DOWN:
452                         tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
453                         break;
454
455                 case TB_TYPE_USB3_DOWN:
456                         tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
457                         break;
458
459                 default:
460                         break;
461                 }
462
463                 if (tunnel)
464                         list_add_tail(&tunnel->list, list);
465         }
466
467         tb_switch_for_each_port(sw, port) {
468                 if (tb_port_has_remote(port)) {
469                         tb_switch_discover_tunnels(port->remote->sw, list,
470                                                    alloc_hopids);
471                 }
472         }
473 }
474
475 static void tb_discover_tunnels(struct tb *tb)
476 {
477         struct tb_cm *tcm = tb_priv(tb);
478         struct tb_tunnel *tunnel;
479
480         tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
481
482         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
483                 if (tb_tunnel_is_pci(tunnel)) {
484                         struct tb_switch *parent = tunnel->dst_port->sw;
485
486                         while (parent != tunnel->src_port->sw) {
487                                 parent->boot = true;
488                                 parent = tb_switch_parent(parent);
489                         }
490                 } else if (tb_tunnel_is_dp(tunnel)) {
491                         struct tb_port *in = tunnel->src_port;
492                         struct tb_port *out = tunnel->dst_port;
493
494                         /* Keep the domain from powering down */
495                         pm_runtime_get_sync(&in->sw->dev);
496                         pm_runtime_get_sync(&out->sw->dev);
497
498                         tb_discover_bandwidth_group(tcm, in, out);
499                 }
500         }
501 }
502
503 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
504 {
505         if (tb_switch_is_usb4(port->sw))
506                 return usb4_port_configure_xdomain(port, xd);
507         return tb_lc_configure_xdomain(port);
508 }
509
510 static void tb_port_unconfigure_xdomain(struct tb_port *port)
511 {
512         if (tb_switch_is_usb4(port->sw))
513                 usb4_port_unconfigure_xdomain(port);
514         else
515                 tb_lc_unconfigure_xdomain(port);
516
517         tb_port_enable(port->dual_link_port);
518 }
519
520 static void tb_scan_xdomain(struct tb_port *port)
521 {
522         struct tb_switch *sw = port->sw;
523         struct tb *tb = sw->tb;
524         struct tb_xdomain *xd;
525         u64 route;
526
527         if (!tb_is_xdomain_enabled())
528                 return;
529
530         route = tb_downstream_route(port);
531         xd = tb_xdomain_find_by_route(tb, route);
532         if (xd) {
533                 tb_xdomain_put(xd);
534                 return;
535         }
536
537         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
538                               NULL);
539         if (xd) {
540                 tb_port_at(route, sw)->xdomain = xd;
541                 tb_port_configure_xdomain(port, xd);
542                 tb_xdomain_add(xd);
543         }
544 }
545
546 /**
547  * tb_find_unused_port() - return the first inactive port on @sw
548  * @sw: Switch to find the port on
549  * @type: Port type to look for
550  */
551 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
552                                            enum tb_port_type type)
553 {
554         struct tb_port *port;
555
556         tb_switch_for_each_port(sw, port) {
557                 if (tb_is_upstream_port(port))
558                         continue;
559                 if (port->config.type != type)
560                         continue;
561                 if (!port->cap_adap)
562                         continue;
563                 if (tb_port_is_enabled(port))
564                         continue;
565                 return port;
566         }
567         return NULL;
568 }
569
570 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
571                                          const struct tb_port *port)
572 {
573         struct tb_port *down;
574
575         down = usb4_switch_map_usb3_down(sw, port);
576         if (down && !tb_usb3_port_is_enabled(down))
577                 return down;
578         return NULL;
579 }
580
581 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
582                                         struct tb_port *src_port,
583                                         struct tb_port *dst_port)
584 {
585         struct tb_cm *tcm = tb_priv(tb);
586         struct tb_tunnel *tunnel;
587
588         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
589                 if (tunnel->type == type &&
590                     ((src_port && src_port == tunnel->src_port) ||
591                      (dst_port && dst_port == tunnel->dst_port))) {
592                         return tunnel;
593                 }
594         }
595
596         return NULL;
597 }
598
599 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
600                                                    struct tb_port *src_port,
601                                                    struct tb_port *dst_port)
602 {
603         struct tb_port *port, *usb3_down;
604         struct tb_switch *sw;
605
606         /* Pick the router that is deepest in the topology */
607         if (tb_port_path_direction_downstream(src_port, dst_port))
608                 sw = dst_port->sw;
609         else
610                 sw = src_port->sw;
611
612         /* Can't be the host router */
613         if (sw == tb->root_switch)
614                 return NULL;
615
616         /* Find the downstream USB4 port that leads to this router */
617         port = tb_port_at(tb_route(sw), tb->root_switch);
618         /* Find the corresponding host router USB3 downstream port */
619         usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
620         if (!usb3_down)
621                 return NULL;
622
623         return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
624 }
625
626 /**
627  * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
628  * @tb: Domain structure
629  * @src_port: Source protocol adapter
630  * @dst_port: Destination protocol adapter
631  * @port: USB4 port the consumed bandwidth is calculated
632  * @consumed_up: Consumed upsream bandwidth (Mb/s)
633  * @consumed_down: Consumed downstream bandwidth (Mb/s)
634  *
635  * Calculates consumed USB3 and PCIe bandwidth at @port between path
636  * from @src_port to @dst_port. Does not take tunnel starting from
637  * @src_port and ending from @src_port into account.
638  */
639 static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
640                                            struct tb_port *src_port,
641                                            struct tb_port *dst_port,
642                                            struct tb_port *port,
643                                            int *consumed_up,
644                                            int *consumed_down)
645 {
646         int pci_consumed_up, pci_consumed_down;
647         struct tb_tunnel *tunnel;
648
649         *consumed_up = *consumed_down = 0;
650
651         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
652         if (tunnel && tunnel->src_port != src_port &&
653             tunnel->dst_port != dst_port) {
654                 int ret;
655
656                 ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
657                                                    consumed_down);
658                 if (ret)
659                         return ret;
660         }
661
662         /*
663          * If there is anything reserved for PCIe bulk traffic take it
664          * into account here too.
665          */
666         if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
667                 *consumed_up += pci_consumed_up;
668                 *consumed_down += pci_consumed_down;
669         }
670
671         return 0;
672 }
673
674 /**
675  * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
676  * @tb: Domain structure
677  * @src_port: Source protocol adapter
678  * @dst_port: Destination protocol adapter
679  * @port: USB4 port the consumed bandwidth is calculated
680  * @consumed_up: Consumed upsream bandwidth (Mb/s)
681  * @consumed_down: Consumed downstream bandwidth (Mb/s)
682  *
683  * Calculates consumed DP bandwidth at @port between path from @src_port
684  * to @dst_port. Does not take tunnel starting from @src_port and ending
685  * from @src_port into account.
686  */
687 static int tb_consumed_dp_bandwidth(struct tb *tb,
688                                     struct tb_port *src_port,
689                                     struct tb_port *dst_port,
690                                     struct tb_port *port,
691                                     int *consumed_up,
692                                     int *consumed_down)
693 {
694         struct tb_cm *tcm = tb_priv(tb);
695         struct tb_tunnel *tunnel;
696         int ret;
697
698         *consumed_up = *consumed_down = 0;
699
700         /*
701          * Find all DP tunnels that cross the port and reduce
702          * their consumed bandwidth from the available.
703          */
704         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
705                 int dp_consumed_up, dp_consumed_down;
706
707                 if (tb_tunnel_is_invalid(tunnel))
708                         continue;
709
710                 if (!tb_tunnel_is_dp(tunnel))
711                         continue;
712
713                 if (!tb_tunnel_port_on_path(tunnel, port))
714                         continue;
715
716                 /*
717                  * Ignore the DP tunnel between src_port and dst_port
718                  * because it is the same tunnel and we may be
719                  * re-calculating estimated bandwidth.
720                  */
721                 if (tunnel->src_port == src_port &&
722                     tunnel->dst_port == dst_port)
723                         continue;
724
725                 ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
726                                                    &dp_consumed_down);
727                 if (ret)
728                         return ret;
729
730                 *consumed_up += dp_consumed_up;
731                 *consumed_down += dp_consumed_down;
732         }
733
734         return 0;
735 }
736
737 static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
738                               struct tb_port *port)
739 {
740         bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
741         enum tb_link_width width;
742
743         if (tb_is_upstream_port(port))
744                 width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
745         else
746                 width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
747
748         return tb_port_width_supported(port, width);
749 }
750
751 /**
752  * tb_maximum_bandwidth() - Maximum bandwidth over a single link
753  * @tb: Domain structure
754  * @src_port: Source protocol adapter
755  * @dst_port: Destination protocol adapter
756  * @port: USB4 port the total bandwidth is calculated
757  * @max_up: Maximum upstream bandwidth (Mb/s)
758  * @max_down: Maximum downstream bandwidth (Mb/s)
759  * @include_asym: Include bandwidth if the link is switched from
760  *                symmetric to asymmetric
761  *
762  * Returns maximum possible bandwidth in @max_up and @max_down over a
763  * single link at @port. If @include_asym is set then includes the
764  * additional banwdith if the links are transitioned into asymmetric to
765  * direction from @src_port to @dst_port.
766  */
767 static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
768                                 struct tb_port *dst_port, struct tb_port *port,
769                                 int *max_up, int *max_down, bool include_asym)
770 {
771         bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
772         int link_speed, link_width, up_bw, down_bw;
773
774         /*
775          * Can include asymmetric, only if it is actually supported by
776          * the lane adapter.
777          */
778         if (!tb_asym_supported(src_port, dst_port, port))
779                 include_asym = false;
780
781         if (tb_is_upstream_port(port)) {
782                 link_speed = port->sw->link_speed;
783                 /*
784                  * sw->link_width is from upstream perspective so we use
785                  * the opposite for downstream of the host router.
786                  */
787                 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
788                         up_bw = link_speed * 3 * 1000;
789                         down_bw = link_speed * 1 * 1000;
790                 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
791                         up_bw = link_speed * 1 * 1000;
792                         down_bw = link_speed * 3 * 1000;
793                 } else if (include_asym) {
794                         /*
795                          * The link is symmetric at the moment but we
796                          * can switch it to asymmetric as needed. Report
797                          * this bandwidth as available (even though it
798                          * is not yet enabled).
799                          */
800                         if (downstream) {
801                                 up_bw = link_speed * 1 * 1000;
802                                 down_bw = link_speed * 3 * 1000;
803                         } else {
804                                 up_bw = link_speed * 3 * 1000;
805                                 down_bw = link_speed * 1 * 1000;
806                         }
807                 } else {
808                         up_bw = link_speed * port->sw->link_width * 1000;
809                         down_bw = up_bw;
810                 }
811         } else {
812                 link_speed = tb_port_get_link_speed(port);
813                 if (link_speed < 0)
814                         return link_speed;
815
816                 link_width = tb_port_get_link_width(port);
817                 if (link_width < 0)
818                         return link_width;
819
820                 if (link_width == TB_LINK_WIDTH_ASYM_TX) {
821                         up_bw = link_speed * 1 * 1000;
822                         down_bw = link_speed * 3 * 1000;
823                 } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
824                         up_bw = link_speed * 3 * 1000;
825                         down_bw = link_speed * 1 * 1000;
826                 } else if (include_asym) {
827                         /*
828                          * The link is symmetric at the moment but we
829                          * can switch it to asymmetric as needed. Report
830                          * this bandwidth as available (even though it
831                          * is not yet enabled).
832                          */
833                         if (downstream) {
834                                 up_bw = link_speed * 1 * 1000;
835                                 down_bw = link_speed * 3 * 1000;
836                         } else {
837                                 up_bw = link_speed * 3 * 1000;
838                                 down_bw = link_speed * 1 * 1000;
839                         }
840                 } else {
841                         up_bw = link_speed * link_width * 1000;
842                         down_bw = up_bw;
843                 }
844         }
845
846         /* Leave 10% guard band */
847         *max_up = up_bw - up_bw / 10;
848         *max_down = down_bw - down_bw / 10;
849
850         tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
851         return 0;
852 }
853
854 /**
855  * tb_available_bandwidth() - Available bandwidth for tunneling
856  * @tb: Domain structure
857  * @src_port: Source protocol adapter
858  * @dst_port: Destination protocol adapter
859  * @available_up: Available bandwidth upstream (Mb/s)
860  * @available_down: Available bandwidth downstream (Mb/s)
861  * @include_asym: Include bandwidth if the link is switched from
862  *                symmetric to asymmetric
863  *
864  * Calculates maximum available bandwidth for protocol tunneling between
865  * @src_port and @dst_port at the moment. This is minimum of maximum
866  * link bandwidth across all links reduced by currently consumed
867  * bandwidth on that link.
868  *
869  * If @include_asym is true then includes also bandwidth that can be
870  * added when the links are transitioned into asymmetric (but does not
871  * transition the links).
872  */
873 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
874                                  struct tb_port *dst_port, int *available_up,
875                                  int *available_down, bool include_asym)
876 {
877         struct tb_port *port;
878         int ret;
879
880         /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
881         *available_up = *available_down = 120000;
882
883         /* Find the minimum available bandwidth over all links */
884         tb_for_each_port_on_path(src_port, dst_port, port) {
885                 int max_up, max_down, consumed_up, consumed_down;
886
887                 if (!tb_port_is_null(port))
888                         continue;
889
890                 ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
891                                            &max_up, &max_down, include_asym);
892                 if (ret)
893                         return ret;
894
895                 ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
896                                                       port, &consumed_up,
897                                                       &consumed_down);
898                 if (ret)
899                         return ret;
900                 max_up -= consumed_up;
901                 max_down -= consumed_down;
902
903                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
904                                                &consumed_up, &consumed_down);
905                 if (ret)
906                         return ret;
907                 max_up -= consumed_up;
908                 max_down -= consumed_down;
909
910                 if (max_up < *available_up)
911                         *available_up = max_up;
912                 if (max_down < *available_down)
913                         *available_down = max_down;
914         }
915
916         if (*available_up < 0)
917                 *available_up = 0;
918         if (*available_down < 0)
919                 *available_down = 0;
920
921         return 0;
922 }
923
924 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
925                                             struct tb_port *src_port,
926                                             struct tb_port *dst_port)
927 {
928         struct tb_tunnel *tunnel;
929
930         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
931         return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
932 }
933
934 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
935                                       struct tb_port *dst_port)
936 {
937         int ret, available_up, available_down;
938         struct tb_tunnel *tunnel;
939
940         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
941         if (!tunnel)
942                 return;
943
944         tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n");
945
946         /*
947          * Calculate available bandwidth for the first hop USB3 tunnel.
948          * That determines the whole USB3 bandwidth for this branch.
949          */
950         ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
951                                      &available_up, &available_down, false);
952         if (ret) {
953                 tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n");
954                 return;
955         }
956
957         tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up,
958                       available_down);
959
960         tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
961 }
962
963 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
964 {
965         struct tb_switch *parent = tb_switch_parent(sw);
966         int ret, available_up, available_down;
967         struct tb_port *up, *down, *port;
968         struct tb_cm *tcm = tb_priv(tb);
969         struct tb_tunnel *tunnel;
970
971         if (!tb_acpi_may_tunnel_usb3()) {
972                 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
973                 return 0;
974         }
975
976         up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
977         if (!up)
978                 return 0;
979
980         if (!sw->link_usb4)
981                 return 0;
982
983         /*
984          * Look up available down port. Since we are chaining it should
985          * be found right above this switch.
986          */
987         port = tb_switch_downstream_port(sw);
988         down = tb_find_usb3_down(parent, port);
989         if (!down)
990                 return 0;
991
992         if (tb_route(parent)) {
993                 struct tb_port *parent_up;
994                 /*
995                  * Check first that the parent switch has its upstream USB3
996                  * port enabled. Otherwise the chain is not complete and
997                  * there is no point setting up a new tunnel.
998                  */
999                 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
1000                 if (!parent_up || !tb_port_is_enabled(parent_up))
1001                         return 0;
1002
1003                 /* Make all unused bandwidth available for the new tunnel */
1004                 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
1005                 if (ret)
1006                         return ret;
1007         }
1008
1009         ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
1010                                      false);
1011         if (ret)
1012                 goto err_reclaim;
1013
1014         tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
1015                     available_up, available_down);
1016
1017         tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
1018                                       available_down);
1019         if (!tunnel) {
1020                 ret = -ENOMEM;
1021                 goto err_reclaim;
1022         }
1023
1024         if (tb_tunnel_activate(tunnel)) {
1025                 tb_port_info(up,
1026                              "USB3 tunnel activation failed, aborting\n");
1027                 ret = -EIO;
1028                 goto err_free;
1029         }
1030
1031         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1032         if (tb_route(parent))
1033                 tb_reclaim_usb3_bandwidth(tb, down, up);
1034
1035         return 0;
1036
1037 err_free:
1038         tb_tunnel_free(tunnel);
1039 err_reclaim:
1040         if (tb_route(parent))
1041                 tb_reclaim_usb3_bandwidth(tb, down, up);
1042
1043         return ret;
1044 }
1045
1046 static int tb_create_usb3_tunnels(struct tb_switch *sw)
1047 {
1048         struct tb_port *port;
1049         int ret;
1050
1051         if (!tb_acpi_may_tunnel_usb3())
1052                 return 0;
1053
1054         if (tb_route(sw)) {
1055                 ret = tb_tunnel_usb3(sw->tb, sw);
1056                 if (ret)
1057                         return ret;
1058         }
1059
1060         tb_switch_for_each_port(sw, port) {
1061                 if (!tb_port_has_remote(port))
1062                         continue;
1063                 ret = tb_create_usb3_tunnels(port->remote->sw);
1064                 if (ret)
1065                         return ret;
1066         }
1067
1068         return 0;
1069 }
1070
1071 /**
1072  * tb_configure_asym() - Transition links to asymmetric if needed
1073  * @tb: Domain structure
1074  * @src_port: Source adapter to start the transition
1075  * @dst_port: Destination adapter
1076  * @requested_up: Additional bandwidth (Mb/s) required upstream
1077  * @requested_down: Additional bandwidth (Mb/s) required downstream
1078  *
1079  * Transition links between @src_port and @dst_port into asymmetric, with
1080  * three lanes in the direction from @src_port towards @dst_port and one lane
1081  * in the opposite direction, if the bandwidth requirements
1082  * (requested + currently consumed) on that link exceed @asym_threshold.
1083  *
1084  * Must be called with available >= requested over all links.
1085  */
1086 static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
1087                              struct tb_port *dst_port, int requested_up,
1088                              int requested_down)
1089 {
1090         struct tb_switch *sw;
1091         bool clx, downstream;
1092         struct tb_port *up;
1093         int ret = 0;
1094
1095         if (!asym_threshold)
1096                 return 0;
1097
1098         /* Disable CL states before doing any transitions */
1099         downstream = tb_port_path_direction_downstream(src_port, dst_port);
1100         /* Pick up router deepest in the hierarchy */
1101         if (downstream)
1102                 sw = dst_port->sw;
1103         else
1104                 sw = src_port->sw;
1105
1106         clx = tb_disable_clx(sw);
1107
1108         tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1109                 int consumed_up, consumed_down;
1110                 enum tb_link_width width;
1111
1112                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1113                                                &consumed_up, &consumed_down);
1114                 if (ret)
1115                         break;
1116
1117                 if (downstream) {
1118                         /*
1119                          * Downstream so make sure upstream is within the 36G
1120                          * (40G - guard band 10%), and the requested is above
1121                          * what the threshold is.
1122                          */
1123                         if (consumed_up + requested_up >= TB_ASYM_MIN) {
1124                                 ret = -ENOBUFS;
1125                                 break;
1126                         }
1127                         /* Does consumed + requested exceed the threshold */
1128                         if (consumed_down + requested_down < asym_threshold)
1129                                 continue;
1130
1131                         width = TB_LINK_WIDTH_ASYM_RX;
1132                 } else {
1133                         /* Upstream, the opposite of above */
1134                         if (consumed_down + requested_down >= TB_ASYM_MIN) {
1135                                 ret = -ENOBUFS;
1136                                 break;
1137                         }
1138                         if (consumed_up + requested_up < asym_threshold)
1139                                 continue;
1140
1141                         width = TB_LINK_WIDTH_ASYM_TX;
1142                 }
1143
1144                 if (up->sw->link_width == width)
1145                         continue;
1146
1147                 if (!tb_port_width_supported(up, width))
1148                         continue;
1149
1150                 tb_sw_dbg(up->sw, "configuring asymmetric link\n");
1151
1152                 /*
1153                  * Here requested + consumed > threshold so we need to
1154                  * transtion the link into asymmetric now.
1155                  */
1156                 ret = tb_switch_set_link_width(up->sw, width);
1157                 if (ret) {
1158                         tb_sw_warn(up->sw, "failed to set link width\n");
1159                         break;
1160                 }
1161         }
1162
1163         /* Re-enable CL states if they were previosly enabled */
1164         if (clx)
1165                 tb_enable_clx(sw);
1166
1167         return ret;
1168 }
1169
1170 /**
1171  * tb_configure_sym() - Transition links to symmetric if possible
1172  * @tb: Domain structure
1173  * @src_port: Source adapter to start the transition
1174  * @dst_port: Destination adapter
1175  * @requested_up: New lower bandwidth request upstream (Mb/s)
1176  * @requested_down: New lower bandwidth request downstream (Mb/s)
1177  *
1178  * Goes over each link from @src_port to @dst_port and tries to
1179  * transition the link to symmetric if the currently consumed bandwidth
1180  * allows.
1181  */
1182 static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
1183                             struct tb_port *dst_port, int requested_up,
1184                             int requested_down)
1185 {
1186         struct tb_switch *sw;
1187         bool clx, downstream;
1188         struct tb_port *up;
1189         int ret = 0;
1190
1191         if (!asym_threshold)
1192                 return 0;
1193
1194         /* Disable CL states before doing any transitions */
1195         downstream = tb_port_path_direction_downstream(src_port, dst_port);
1196         /* Pick up router deepest in the hierarchy */
1197         if (downstream)
1198                 sw = dst_port->sw;
1199         else
1200                 sw = src_port->sw;
1201
1202         clx = tb_disable_clx(sw);
1203
1204         tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1205                 int consumed_up, consumed_down;
1206
1207                 /* Already symmetric */
1208                 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
1209                         continue;
1210                 /* Unplugged, no need to switch */
1211                 if (up->sw->is_unplugged)
1212                         continue;
1213
1214                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1215                                                &consumed_up, &consumed_down);
1216                 if (ret)
1217                         break;
1218
1219                 if (downstream) {
1220                         /*
1221                          * Downstream so we want the consumed_down < threshold.
1222                          * Upstream traffic should be less than 36G (40G
1223                          * guard band 10%) as the link was configured asymmetric
1224                          * already.
1225                          */
1226                         if (consumed_down + requested_down >= asym_threshold)
1227                                 continue;
1228                 } else {
1229                         if (consumed_up + requested_up >= asym_threshold)
1230                                 continue;
1231                 }
1232
1233                 if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
1234                         continue;
1235
1236                 tb_sw_dbg(up->sw, "configuring symmetric link\n");
1237
1238                 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
1239                 if (ret) {
1240                         tb_sw_warn(up->sw, "failed to set link width\n");
1241                         break;
1242                 }
1243         }
1244
1245         /* Re-enable CL states if they were previosly enabled */
1246         if (clx)
1247                 tb_enable_clx(sw);
1248
1249         return ret;
1250 }
1251
1252 static void tb_configure_link(struct tb_port *down, struct tb_port *up,
1253                               struct tb_switch *sw)
1254 {
1255         struct tb *tb = sw->tb;
1256
1257         /* Link the routers using both links if available */
1258         down->remote = up;
1259         up->remote = down;
1260         if (down->dual_link_port && up->dual_link_port) {
1261                 down->dual_link_port->remote = up->dual_link_port;
1262                 up->dual_link_port->remote = down->dual_link_port;
1263         }
1264
1265         /*
1266          * Enable lane bonding if the link is currently two single lane
1267          * links.
1268          */
1269         if (sw->link_width < TB_LINK_WIDTH_DUAL)
1270                 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
1271
1272         /*
1273          * Device router that comes up as symmetric link is
1274          * connected deeper in the hierarchy, we transition the links
1275          * above into symmetric if bandwidth allows.
1276          */
1277         if (tb_switch_depth(sw) > 1 &&
1278             tb_port_get_link_generation(up) >= 4 &&
1279             up->sw->link_width == TB_LINK_WIDTH_DUAL) {
1280                 struct tb_port *host_port;
1281
1282                 host_port = tb_port_at(tb_route(sw), tb->root_switch);
1283                 tb_configure_sym(tb, host_port, up, 0, 0);
1284         }
1285
1286         /* Set the link configured */
1287         tb_switch_configure_link(sw);
1288 }
1289
1290 static void tb_scan_port(struct tb_port *port);
1291
1292 /*
1293  * tb_scan_switch() - scan for and initialize downstream switches
1294  */
1295 static void tb_scan_switch(struct tb_switch *sw)
1296 {
1297         struct tb_port *port;
1298
1299         pm_runtime_get_sync(&sw->dev);
1300
1301         tb_switch_for_each_port(sw, port)
1302                 tb_scan_port(port);
1303
1304         pm_runtime_mark_last_busy(&sw->dev);
1305         pm_runtime_put_autosuspend(&sw->dev);
1306 }
1307
1308 /*
1309  * tb_scan_port() - check for and initialize switches below port
1310  */
1311 static void tb_scan_port(struct tb_port *port)
1312 {
1313         struct tb_cm *tcm = tb_priv(port->sw->tb);
1314         struct tb_port *upstream_port;
1315         bool discovery = false;
1316         struct tb_switch *sw;
1317
1318         if (tb_is_upstream_port(port))
1319                 return;
1320
1321         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
1322             !tb_dp_port_is_enabled(port)) {
1323                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
1324                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
1325                                  false);
1326                 return;
1327         }
1328
1329         if (port->config.type != TB_TYPE_PORT)
1330                 return;
1331         if (port->dual_link_port && port->link_nr)
1332                 return; /*
1333                          * Downstream switch is reachable through two ports.
1334                          * Only scan on the primary port (link_nr == 0).
1335                          */
1336
1337         if (port->usb4)
1338                 pm_runtime_get_sync(&port->usb4->dev);
1339
1340         if (tb_wait_for_port(port, false) <= 0)
1341                 goto out_rpm_put;
1342         if (port->remote) {
1343                 tb_port_dbg(port, "port already has a remote\n");
1344                 goto out_rpm_put;
1345         }
1346
1347         tb_retimer_scan(port, true);
1348
1349         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
1350                              tb_downstream_route(port));
1351         if (IS_ERR(sw)) {
1352                 /*
1353                  * If there is an error accessing the connected switch
1354                  * it may be connected to another domain. Also we allow
1355                  * the other domain to be connected to a max depth switch.
1356                  */
1357                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
1358                         tb_scan_xdomain(port);
1359                 goto out_rpm_put;
1360         }
1361
1362         if (tb_switch_configure(sw)) {
1363                 tb_switch_put(sw);
1364                 goto out_rpm_put;
1365         }
1366
1367         /*
1368          * If there was previously another domain connected remove it
1369          * first.
1370          */
1371         if (port->xdomain) {
1372                 tb_xdomain_remove(port->xdomain);
1373                 tb_port_unconfigure_xdomain(port);
1374                 port->xdomain = NULL;
1375         }
1376
1377         /*
1378          * Do not send uevents until we have discovered all existing
1379          * tunnels and know which switches were authorized already by
1380          * the boot firmware.
1381          */
1382         if (!tcm->hotplug_active) {
1383                 dev_set_uevent_suppress(&sw->dev, true);
1384                 discovery = true;
1385         }
1386
1387         /*
1388          * At the moment Thunderbolt 2 and beyond (devices with LC) we
1389          * can support runtime PM.
1390          */
1391         sw->rpm = sw->generation > 1;
1392
1393         if (tb_switch_add(sw)) {
1394                 tb_switch_put(sw);
1395                 goto out_rpm_put;
1396         }
1397
1398         upstream_port = tb_upstream_port(sw);
1399         tb_configure_link(port, upstream_port, sw);
1400
1401         /*
1402          * CL0s and CL1 are enabled and supported together.
1403          * Silently ignore CLx enabling in case CLx is not supported.
1404          */
1405         if (discovery)
1406                 tb_sw_dbg(sw, "discovery, not touching CL states\n");
1407         else if (tb_enable_clx(sw))
1408                 tb_sw_warn(sw, "failed to enable CL states\n");
1409
1410         if (tb_enable_tmu(sw))
1411                 tb_sw_warn(sw, "failed to enable TMU\n");
1412
1413         /*
1414          * Configuration valid needs to be set after the TMU has been
1415          * enabled for the upstream port of the router so we do it here.
1416          */
1417         tb_switch_configuration_valid(sw);
1418
1419         /* Scan upstream retimers */
1420         tb_retimer_scan(upstream_port, true);
1421
1422         /*
1423          * Create USB 3.x tunnels only when the switch is plugged to the
1424          * domain. This is because we scan the domain also during discovery
1425          * and want to discover existing USB 3.x tunnels before we create
1426          * any new.
1427          */
1428         if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1429                 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1430
1431         tb_add_dp_resources(sw);
1432         tb_scan_switch(sw);
1433
1434 out_rpm_put:
1435         if (port->usb4) {
1436                 pm_runtime_mark_last_busy(&port->usb4->dev);
1437                 pm_runtime_put_autosuspend(&port->usb4->dev);
1438         }
1439 }
1440
1441 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
1442 {
1443         struct tb_port *src_port, *dst_port;
1444         struct tb *tb;
1445
1446         if (!tunnel)
1447                 return;
1448
1449         tb_tunnel_deactivate(tunnel);
1450         list_del(&tunnel->list);
1451
1452         tb = tunnel->tb;
1453         src_port = tunnel->src_port;
1454         dst_port = tunnel->dst_port;
1455
1456         switch (tunnel->type) {
1457         case TB_TUNNEL_DP:
1458                 tb_detach_bandwidth_group(src_port);
1459                 /*
1460                  * In case of DP tunnel make sure the DP IN resource is
1461                  * deallocated properly.
1462                  */
1463                 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
1464                 /*
1465                  * If bandwidth on a link is < asym_threshold
1466                  * transition the link to symmetric.
1467                  */
1468                 tb_configure_sym(tb, src_port, dst_port, 0, 0);
1469                 /* Now we can allow the domain to runtime suspend again */
1470                 pm_runtime_mark_last_busy(&dst_port->sw->dev);
1471                 pm_runtime_put_autosuspend(&dst_port->sw->dev);
1472                 pm_runtime_mark_last_busy(&src_port->sw->dev);
1473                 pm_runtime_put_autosuspend(&src_port->sw->dev);
1474                 fallthrough;
1475
1476         case TB_TUNNEL_USB3:
1477                 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
1478                 break;
1479
1480         default:
1481                 /*
1482                  * PCIe and DMA tunnels do not consume guaranteed
1483                  * bandwidth.
1484                  */
1485                 break;
1486         }
1487
1488         tb_tunnel_free(tunnel);
1489 }
1490
1491 /*
1492  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
1493  */
1494 static void tb_free_invalid_tunnels(struct tb *tb)
1495 {
1496         struct tb_cm *tcm = tb_priv(tb);
1497         struct tb_tunnel *tunnel;
1498         struct tb_tunnel *n;
1499
1500         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1501                 if (tb_tunnel_is_invalid(tunnel))
1502                         tb_deactivate_and_free_tunnel(tunnel);
1503         }
1504 }
1505
1506 /*
1507  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
1508  */
1509 static void tb_free_unplugged_children(struct tb_switch *sw)
1510 {
1511         struct tb_port *port;
1512
1513         tb_switch_for_each_port(sw, port) {
1514                 if (!tb_port_has_remote(port))
1515                         continue;
1516
1517                 if (port->remote->sw->is_unplugged) {
1518                         tb_retimer_remove_all(port);
1519                         tb_remove_dp_resources(port->remote->sw);
1520                         tb_switch_unconfigure_link(port->remote->sw);
1521                         tb_switch_set_link_width(port->remote->sw,
1522                                                  TB_LINK_WIDTH_SINGLE);
1523                         tb_switch_remove(port->remote->sw);
1524                         port->remote = NULL;
1525                         if (port->dual_link_port)
1526                                 port->dual_link_port->remote = NULL;
1527                 } else {
1528                         tb_free_unplugged_children(port->remote->sw);
1529                 }
1530         }
1531 }
1532
1533 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
1534                                          const struct tb_port *port)
1535 {
1536         struct tb_port *down = NULL;
1537
1538         /*
1539          * To keep plugging devices consistently in the same PCIe
1540          * hierarchy, do mapping here for switch downstream PCIe ports.
1541          */
1542         if (tb_switch_is_usb4(sw)) {
1543                 down = usb4_switch_map_pcie_down(sw, port);
1544         } else if (!tb_route(sw)) {
1545                 int phy_port = tb_phy_port_from_link(port->port);
1546                 int index;
1547
1548                 /*
1549                  * Hard-coded Thunderbolt port to PCIe down port mapping
1550                  * per controller.
1551                  */
1552                 if (tb_switch_is_cactus_ridge(sw) ||
1553                     tb_switch_is_alpine_ridge(sw))
1554                         index = !phy_port ? 6 : 7;
1555                 else if (tb_switch_is_falcon_ridge(sw))
1556                         index = !phy_port ? 6 : 8;
1557                 else if (tb_switch_is_titan_ridge(sw))
1558                         index = !phy_port ? 8 : 9;
1559                 else
1560                         goto out;
1561
1562                 /* Validate the hard-coding */
1563                 if (WARN_ON(index > sw->config.max_port_number))
1564                         goto out;
1565
1566                 down = &sw->ports[index];
1567         }
1568
1569         if (down) {
1570                 if (WARN_ON(!tb_port_is_pcie_down(down)))
1571                         goto out;
1572                 if (tb_pci_port_is_enabled(down))
1573                         goto out;
1574
1575                 return down;
1576         }
1577
1578 out:
1579         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
1580 }
1581
1582 static void
1583 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1584 {
1585         struct tb_tunnel *first_tunnel;
1586         struct tb *tb = group->tb;
1587         struct tb_port *in;
1588         int ret;
1589
1590         tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1591                group->index);
1592
1593         first_tunnel = NULL;
1594         list_for_each_entry(in, &group->ports, group_list) {
1595                 int estimated_bw, estimated_up, estimated_down;
1596                 struct tb_tunnel *tunnel;
1597                 struct tb_port *out;
1598
1599                 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1600                         continue;
1601
1602                 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1603                 if (WARN_ON(!tunnel))
1604                         break;
1605
1606                 if (!first_tunnel) {
1607                         /*
1608                          * Since USB3 bandwidth is shared by all DP
1609                          * tunnels under the host router USB4 port, even
1610                          * if they do not begin from the host router, we
1611                          * can release USB3 bandwidth just once and not
1612                          * for each tunnel separately.
1613                          */
1614                         first_tunnel = tunnel;
1615                         ret = tb_release_unused_usb3_bandwidth(tb,
1616                                 first_tunnel->src_port, first_tunnel->dst_port);
1617                         if (ret) {
1618                                 tb_tunnel_warn(tunnel,
1619                                         "failed to release unused bandwidth\n");
1620                                 break;
1621                         }
1622                 }
1623
1624                 out = tunnel->dst_port;
1625                 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1626                                              &estimated_down, true);
1627                 if (ret) {
1628                         tb_tunnel_warn(tunnel,
1629                                 "failed to re-calculate estimated bandwidth\n");
1630                         break;
1631                 }
1632
1633                 /*
1634                  * Estimated bandwidth includes:
1635                  *  - already allocated bandwidth for the DP tunnel
1636                  *  - available bandwidth along the path
1637                  *  - bandwidth allocated for USB 3.x but not used.
1638                  */
1639                 tb_tunnel_dbg(tunnel,
1640                               "re-calculated estimated bandwidth %u/%u Mb/s\n",
1641                               estimated_up, estimated_down);
1642
1643                 if (tb_port_path_direction_downstream(in, out))
1644                         estimated_bw = estimated_down;
1645                 else
1646                         estimated_bw = estimated_up;
1647
1648                 if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
1649                         tb_tunnel_warn(tunnel,
1650                                        "failed to update estimated bandwidth\n");
1651         }
1652
1653         if (first_tunnel)
1654                 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1655                                           first_tunnel->dst_port);
1656
1657         tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1658 }
1659
1660 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1661 {
1662         struct tb_cm *tcm = tb_priv(tb);
1663         int i;
1664
1665         tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1666
1667         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1668                 struct tb_bandwidth_group *group = &tcm->groups[i];
1669
1670                 if (!list_empty(&group->ports))
1671                         tb_recalc_estimated_bandwidth_for_group(group);
1672         }
1673
1674         tb_dbg(tb, "bandwidth re-calculation done\n");
1675 }
1676
1677 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1678 {
1679         struct tb_port *host_port, *port;
1680         struct tb_cm *tcm = tb_priv(tb);
1681
1682         host_port = tb_route(in->sw) ?
1683                 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1684
1685         list_for_each_entry(port, &tcm->dp_resources, list) {
1686                 if (!tb_port_is_dpout(port))
1687                         continue;
1688
1689                 if (tb_port_is_enabled(port)) {
1690                         tb_port_dbg(port, "DP OUT in use\n");
1691                         continue;
1692                 }
1693
1694                 tb_port_dbg(port, "DP OUT available\n");
1695
1696                 /*
1697                  * Keep the DP tunnel under the topology starting from
1698                  * the same host router downstream port.
1699                  */
1700                 if (host_port && tb_route(port->sw)) {
1701                         struct tb_port *p;
1702
1703                         p = tb_port_at(tb_route(port->sw), tb->root_switch);
1704                         if (p != host_port)
1705                                 continue;
1706                 }
1707
1708                 return port;
1709         }
1710
1711         return NULL;
1712 }
1713
1714 static bool tb_tunnel_one_dp(struct tb *tb)
1715 {
1716         int available_up, available_down, ret, link_nr;
1717         struct tb_cm *tcm = tb_priv(tb);
1718         struct tb_port *port, *in, *out;
1719         int consumed_up, consumed_down;
1720         struct tb_tunnel *tunnel;
1721
1722         /*
1723          * Find pair of inactive DP IN and DP OUT adapters and then
1724          * establish a DP tunnel between them.
1725          */
1726         tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1727
1728         in = NULL;
1729         out = NULL;
1730         list_for_each_entry(port, &tcm->dp_resources, list) {
1731                 if (!tb_port_is_dpin(port))
1732                         continue;
1733
1734                 if (tb_port_is_enabled(port)) {
1735                         tb_port_dbg(port, "DP IN in use\n");
1736                         continue;
1737                 }
1738
1739                 in = port;
1740                 tb_port_dbg(in, "DP IN available\n");
1741
1742                 out = tb_find_dp_out(tb, port);
1743                 if (out)
1744                         break;
1745         }
1746
1747         if (!in) {
1748                 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1749                 return false;
1750         }
1751         if (!out) {
1752                 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
1753                 return false;
1754         }
1755
1756         /*
1757          * This is only applicable to links that are not bonded (so
1758          * when Thunderbolt 1 hardware is involved somewhere in the
1759          * topology). For these try to share the DP bandwidth between
1760          * the two lanes.
1761          */
1762         link_nr = 1;
1763         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1764                 if (tb_tunnel_is_dp(tunnel)) {
1765                         link_nr = 0;
1766                         break;
1767                 }
1768         }
1769
1770         /*
1771          * DP stream needs the domain to be active so runtime resume
1772          * both ends of the tunnel.
1773          *
1774          * This should bring the routers in the middle active as well
1775          * and keeps the domain from runtime suspending while the DP
1776          * tunnel is active.
1777          */
1778         pm_runtime_get_sync(&in->sw->dev);
1779         pm_runtime_get_sync(&out->sw->dev);
1780
1781         if (tb_switch_alloc_dp_resource(in->sw, in)) {
1782                 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
1783                 goto err_rpm_put;
1784         }
1785
1786         if (!tb_attach_bandwidth_group(tcm, in, out))
1787                 goto err_dealloc_dp;
1788
1789         /* Make all unused USB3 bandwidth available for the new DP tunnel */
1790         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1791         if (ret) {
1792                 tb_warn(tb, "failed to release unused bandwidth\n");
1793                 goto err_detach_group;
1794         }
1795
1796         ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
1797                                      true);
1798         if (ret)
1799                 goto err_reclaim_usb;
1800
1801         tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1802                available_up, available_down);
1803
1804         tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1805                                     available_down);
1806         if (!tunnel) {
1807                 tb_port_dbg(out, "could not allocate DP tunnel\n");
1808                 goto err_reclaim_usb;
1809         }
1810
1811         if (tb_tunnel_activate(tunnel)) {
1812                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1813                 goto err_free;
1814         }
1815
1816         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1817         tb_reclaim_usb3_bandwidth(tb, in, out);
1818
1819         /*
1820          * Transition the links to asymmetric if the consumption exceeds
1821          * the threshold.
1822          */
1823         if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
1824                 tb_configure_asym(tb, in, out, consumed_up, consumed_down);
1825
1826         /* Update the domain with the new bandwidth estimation */
1827         tb_recalc_estimated_bandwidth(tb);
1828
1829         /*
1830          * In case of DP tunnel exists, change host router's 1st children
1831          * TMU mode to HiFi for CL0s to work.
1832          */
1833         tb_increase_tmu_accuracy(tunnel);
1834         return true;
1835
1836 err_free:
1837         tb_tunnel_free(tunnel);
1838 err_reclaim_usb:
1839         tb_reclaim_usb3_bandwidth(tb, in, out);
1840 err_detach_group:
1841         tb_detach_bandwidth_group(in);
1842 err_dealloc_dp:
1843         tb_switch_dealloc_dp_resource(in->sw, in);
1844 err_rpm_put:
1845         pm_runtime_mark_last_busy(&out->sw->dev);
1846         pm_runtime_put_autosuspend(&out->sw->dev);
1847         pm_runtime_mark_last_busy(&in->sw->dev);
1848         pm_runtime_put_autosuspend(&in->sw->dev);
1849
1850         return false;
1851 }
1852
1853 static void tb_tunnel_dp(struct tb *tb)
1854 {
1855         if (!tb_acpi_may_tunnel_dp()) {
1856                 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1857                 return;
1858         }
1859
1860         while (tb_tunnel_one_dp(tb))
1861                 ;
1862 }
1863
1864 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
1865 {
1866         struct tb_port *in, *out;
1867         struct tb_tunnel *tunnel;
1868
1869         if (tb_port_is_dpin(port)) {
1870                 tb_port_dbg(port, "DP IN resource unavailable\n");
1871                 in = port;
1872                 out = NULL;
1873         } else {
1874                 tb_port_dbg(port, "DP OUT resource unavailable\n");
1875                 in = NULL;
1876                 out = port;
1877         }
1878
1879         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1880         tb_deactivate_and_free_tunnel(tunnel);
1881         list_del_init(&port->list);
1882
1883         /*
1884          * See if there is another DP OUT port that can be used for
1885          * to create another tunnel.
1886          */
1887         tb_recalc_estimated_bandwidth(tb);
1888         tb_tunnel_dp(tb);
1889 }
1890
1891 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1892 {
1893         struct tb_cm *tcm = tb_priv(tb);
1894         struct tb_port *p;
1895
1896         if (tb_port_is_enabled(port))
1897                 return;
1898
1899         list_for_each_entry(p, &tcm->dp_resources, list) {
1900                 if (p == port)
1901                         return;
1902         }
1903
1904         tb_port_dbg(port, "DP %s resource available\n",
1905                     tb_port_is_dpin(port) ? "IN" : "OUT");
1906         list_add_tail(&port->list, &tcm->dp_resources);
1907
1908         /* Look for suitable DP IN <-> DP OUT pairs now */
1909         tb_tunnel_dp(tb);
1910 }
1911
1912 static void tb_disconnect_and_release_dp(struct tb *tb)
1913 {
1914         struct tb_cm *tcm = tb_priv(tb);
1915         struct tb_tunnel *tunnel, *n;
1916
1917         /*
1918          * Tear down all DP tunnels and release their resources. They
1919          * will be re-established after resume based on plug events.
1920          */
1921         list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1922                 if (tb_tunnel_is_dp(tunnel))
1923                         tb_deactivate_and_free_tunnel(tunnel);
1924         }
1925
1926         while (!list_empty(&tcm->dp_resources)) {
1927                 struct tb_port *port;
1928
1929                 port = list_first_entry(&tcm->dp_resources,
1930                                         struct tb_port, list);
1931                 list_del_init(&port->list);
1932         }
1933 }
1934
1935 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1936 {
1937         struct tb_tunnel *tunnel;
1938         struct tb_port *up;
1939
1940         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1941         if (WARN_ON(!up))
1942                 return -ENODEV;
1943
1944         tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1945         if (WARN_ON(!tunnel))
1946                 return -ENODEV;
1947
1948         tb_switch_xhci_disconnect(sw);
1949
1950         tb_tunnel_deactivate(tunnel);
1951         list_del(&tunnel->list);
1952         tb_tunnel_free(tunnel);
1953         return 0;
1954 }
1955
1956 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1957 {
1958         struct tb_port *up, *down, *port;
1959         struct tb_cm *tcm = tb_priv(tb);
1960         struct tb_tunnel *tunnel;
1961
1962         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1963         if (!up)
1964                 return 0;
1965
1966         /*
1967          * Look up available down port. Since we are chaining it should
1968          * be found right above this switch.
1969          */
1970         port = tb_switch_downstream_port(sw);
1971         down = tb_find_pcie_down(tb_switch_parent(sw), port);
1972         if (!down)
1973                 return 0;
1974
1975         tunnel = tb_tunnel_alloc_pci(tb, up, down);
1976         if (!tunnel)
1977                 return -ENOMEM;
1978
1979         if (tb_tunnel_activate(tunnel)) {
1980                 tb_port_info(up,
1981                              "PCIe tunnel activation failed, aborting\n");
1982                 tb_tunnel_free(tunnel);
1983                 return -EIO;
1984         }
1985
1986         /*
1987          * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
1988          * here.
1989          */
1990         if (tb_switch_pcie_l1_enable(sw))
1991                 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1992
1993         if (tb_switch_xhci_connect(sw))
1994                 tb_sw_warn(sw, "failed to connect xHCI\n");
1995
1996         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1997         return 0;
1998 }
1999
2000 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2001                                     int transmit_path, int transmit_ring,
2002                                     int receive_path, int receive_ring)
2003 {
2004         struct tb_cm *tcm = tb_priv(tb);
2005         struct tb_port *nhi_port, *dst_port;
2006         struct tb_tunnel *tunnel;
2007         struct tb_switch *sw;
2008         int ret;
2009
2010         sw = tb_to_switch(xd->dev.parent);
2011         dst_port = tb_port_at(xd->route, sw);
2012         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2013
2014         mutex_lock(&tb->lock);
2015
2016         /*
2017          * When tunneling DMA paths the link should not enter CL states
2018          * so disable them now.
2019          */
2020         tb_disable_clx(sw);
2021
2022         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
2023                                      transmit_ring, receive_path, receive_ring);
2024         if (!tunnel) {
2025                 ret = -ENOMEM;
2026                 goto err_clx;
2027         }
2028
2029         if (tb_tunnel_activate(tunnel)) {
2030                 tb_port_info(nhi_port,
2031                              "DMA tunnel activation failed, aborting\n");
2032                 ret = -EIO;
2033                 goto err_free;
2034         }
2035
2036         list_add_tail(&tunnel->list, &tcm->tunnel_list);
2037         mutex_unlock(&tb->lock);
2038         return 0;
2039
2040 err_free:
2041         tb_tunnel_free(tunnel);
2042 err_clx:
2043         tb_enable_clx(sw);
2044         mutex_unlock(&tb->lock);
2045
2046         return ret;
2047 }
2048
2049 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2050                                           int transmit_path, int transmit_ring,
2051                                           int receive_path, int receive_ring)
2052 {
2053         struct tb_cm *tcm = tb_priv(tb);
2054         struct tb_port *nhi_port, *dst_port;
2055         struct tb_tunnel *tunnel, *n;
2056         struct tb_switch *sw;
2057
2058         sw = tb_to_switch(xd->dev.parent);
2059         dst_port = tb_port_at(xd->route, sw);
2060         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2061
2062         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2063                 if (!tb_tunnel_is_dma(tunnel))
2064                         continue;
2065                 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
2066                         continue;
2067
2068                 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
2069                                         receive_path, receive_ring))
2070                         tb_deactivate_and_free_tunnel(tunnel);
2071         }
2072
2073         /*
2074          * Try to re-enable CL states now, it is OK if this fails
2075          * because we may still have another DMA tunnel active through
2076          * the same host router USB4 downstream port.
2077          */
2078         tb_enable_clx(sw);
2079 }
2080
2081 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2082                                        int transmit_path, int transmit_ring,
2083                                        int receive_path, int receive_ring)
2084 {
2085         if (!xd->is_unplugged) {
2086                 mutex_lock(&tb->lock);
2087                 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
2088                                               transmit_ring, receive_path,
2089                                               receive_ring);
2090                 mutex_unlock(&tb->lock);
2091         }
2092         return 0;
2093 }
2094
2095 /* hotplug handling */
2096
2097 /*
2098  * tb_handle_hotplug() - handle hotplug event
2099  *
2100  * Executes on tb->wq.
2101  */
2102 static void tb_handle_hotplug(struct work_struct *work)
2103 {
2104         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2105         struct tb *tb = ev->tb;
2106         struct tb_cm *tcm = tb_priv(tb);
2107         struct tb_switch *sw;
2108         struct tb_port *port;
2109
2110         /* Bring the domain back from sleep if it was suspended */
2111         pm_runtime_get_sync(&tb->dev);
2112
2113         mutex_lock(&tb->lock);
2114         if (!tcm->hotplug_active)
2115                 goto out; /* during init, suspend or shutdown */
2116
2117         sw = tb_switch_find_by_route(tb, ev->route);
2118         if (!sw) {
2119                 tb_warn(tb,
2120                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
2121                         ev->route, ev->port, ev->unplug);
2122                 goto out;
2123         }
2124         if (ev->port > sw->config.max_port_number) {
2125                 tb_warn(tb,
2126                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
2127                         ev->route, ev->port, ev->unplug);
2128                 goto put_sw;
2129         }
2130         port = &sw->ports[ev->port];
2131         if (tb_is_upstream_port(port)) {
2132                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
2133                        ev->route, ev->port, ev->unplug);
2134                 goto put_sw;
2135         }
2136
2137         pm_runtime_get_sync(&sw->dev);
2138
2139         if (ev->unplug) {
2140                 tb_retimer_remove_all(port);
2141
2142                 if (tb_port_has_remote(port)) {
2143                         tb_port_dbg(port, "switch unplugged\n");
2144                         tb_sw_set_unplugged(port->remote->sw);
2145                         tb_free_invalid_tunnels(tb);
2146                         tb_remove_dp_resources(port->remote->sw);
2147                         tb_switch_tmu_disable(port->remote->sw);
2148                         tb_switch_unconfigure_link(port->remote->sw);
2149                         tb_switch_set_link_width(port->remote->sw,
2150                                                  TB_LINK_WIDTH_SINGLE);
2151                         tb_switch_remove(port->remote->sw);
2152                         port->remote = NULL;
2153                         if (port->dual_link_port)
2154                                 port->dual_link_port->remote = NULL;
2155                         /* Maybe we can create another DP tunnel */
2156                         tb_recalc_estimated_bandwidth(tb);
2157                         tb_tunnel_dp(tb);
2158                 } else if (port->xdomain) {
2159                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
2160
2161                         tb_port_dbg(port, "xdomain unplugged\n");
2162                         /*
2163                          * Service drivers are unbound during
2164                          * tb_xdomain_remove() so setting XDomain as
2165                          * unplugged here prevents deadlock if they call
2166                          * tb_xdomain_disable_paths(). We will tear down
2167                          * all the tunnels below.
2168                          */
2169                         xd->is_unplugged = true;
2170                         tb_xdomain_remove(xd);
2171                         port->xdomain = NULL;
2172                         __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
2173                         tb_xdomain_put(xd);
2174                         tb_port_unconfigure_xdomain(port);
2175                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2176                         tb_dp_resource_unavailable(tb, port);
2177                 } else if (!port->port) {
2178                         tb_sw_dbg(sw, "xHCI disconnect request\n");
2179                         tb_switch_xhci_disconnect(sw);
2180                 } else {
2181                         tb_port_dbg(port,
2182                                    "got unplug event for disconnected port, ignoring\n");
2183                 }
2184         } else if (port->remote) {
2185                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
2186         } else if (!port->port && sw->authorized) {
2187                 tb_sw_dbg(sw, "xHCI connect request\n");
2188                 tb_switch_xhci_connect(sw);
2189         } else {
2190                 if (tb_port_is_null(port)) {
2191                         tb_port_dbg(port, "hotplug: scanning\n");
2192                         tb_scan_port(port);
2193                         if (!port->remote)
2194                                 tb_port_dbg(port, "hotplug: no switch found\n");
2195                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2196                         tb_dp_resource_available(tb, port);
2197                 }
2198         }
2199
2200         pm_runtime_mark_last_busy(&sw->dev);
2201         pm_runtime_put_autosuspend(&sw->dev);
2202
2203 put_sw:
2204         tb_switch_put(sw);
2205 out:
2206         mutex_unlock(&tb->lock);
2207
2208         pm_runtime_mark_last_busy(&tb->dev);
2209         pm_runtime_put_autosuspend(&tb->dev);
2210
2211         kfree(ev);
2212 }
2213
2214 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
2215                                  int *requested_down)
2216 {
2217         int allocated_up, allocated_down, available_up, available_down, ret;
2218         int requested_up_corrected, requested_down_corrected, granularity;
2219         int max_up, max_down, max_up_rounded, max_down_rounded;
2220         struct tb *tb = tunnel->tb;
2221         struct tb_port *in, *out;
2222
2223         ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
2224         if (ret)
2225                 return ret;
2226
2227         in = tunnel->src_port;
2228         out = tunnel->dst_port;
2229
2230         tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n",
2231                       allocated_up, allocated_down);
2232
2233         /*
2234          * If we get rounded up request from graphics side, say HBR2 x 4
2235          * that is 17500 instead of 17280 (this is because of the
2236          * granularity), we allow it too. Here the graphics has already
2237          * negotiated with the DPRX the maximum possible rates (which is
2238          * 17280 in this case).
2239          *
2240          * Since the link cannot go higher than 17280 we use that in our
2241          * calculations but the DP IN adapter Allocated BW write must be
2242          * the same value (17500) otherwise the adapter will mark it as
2243          * failed for graphics.
2244          */
2245         ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
2246         if (ret)
2247                 return ret;
2248
2249         ret = usb4_dp_port_granularity(in);
2250         if (ret < 0)
2251                 return ret;
2252         granularity = ret;
2253
2254         max_up_rounded = roundup(max_up, granularity);
2255         max_down_rounded = roundup(max_down, granularity);
2256
2257         /*
2258          * This will "fix" the request down to the maximum supported
2259          * rate * lanes if it is at the maximum rounded up level.
2260          */
2261         requested_up_corrected = *requested_up;
2262         if (requested_up_corrected == max_up_rounded)
2263                 requested_up_corrected = max_up;
2264         else if (requested_up_corrected < 0)
2265                 requested_up_corrected = 0;
2266         requested_down_corrected = *requested_down;
2267         if (requested_down_corrected == max_down_rounded)
2268                 requested_down_corrected = max_down;
2269         else if (requested_down_corrected < 0)
2270                 requested_down_corrected = 0;
2271
2272         tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n",
2273                       requested_up_corrected, requested_down_corrected);
2274
2275         if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
2276             (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
2277                 tb_tunnel_dbg(tunnel,
2278                               "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
2279                               requested_up_corrected, requested_down_corrected,
2280                               max_up_rounded, max_down_rounded);
2281                 return -ENOBUFS;
2282         }
2283
2284         if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
2285             (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
2286                 /*
2287                  * If bandwidth on a link is < asym_threshold transition
2288                  * the link to symmetric.
2289                  */
2290                 tb_configure_sym(tb, in, out, *requested_up, *requested_down);
2291                 /*
2292                  * If requested bandwidth is less or equal than what is
2293                  * currently allocated to that tunnel we simply change
2294                  * the reservation of the tunnel. Since all the tunnels
2295                  * going out from the same USB4 port are in the same
2296                  * group the released bandwidth will be taken into
2297                  * account for the other tunnels automatically below.
2298                  */
2299                 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2300                                                  requested_down);
2301         }
2302
2303         /*
2304          * More bandwidth is requested. Release all the potential
2305          * bandwidth from USB3 first.
2306          */
2307         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
2308         if (ret)
2309                 return ret;
2310
2311         /*
2312          * Then go over all tunnels that cross the same USB4 ports (they
2313          * are also in the same group but we use the same function here
2314          * that we use with the normal bandwidth allocation).
2315          */
2316         ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
2317                                      true);
2318         if (ret)
2319                 goto reclaim;
2320
2321         tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n",
2322                       available_up, available_down);
2323
2324         if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
2325             (*requested_down >= 0 && available_down >= requested_down_corrected)) {
2326                 /*
2327                  * If bandwidth on a link is >= asym_threshold
2328                  * transition the link to asymmetric.
2329                  */
2330                 ret = tb_configure_asym(tb, in, out, *requested_up,
2331                                         *requested_down);
2332                 if (ret) {
2333                         tb_configure_sym(tb, in, out, 0, 0);
2334                         return ret;
2335                 }
2336
2337                 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2338                                                 requested_down);
2339                 if (ret) {
2340                         tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
2341                         tb_configure_sym(tb, in, out, 0, 0);
2342                 }
2343         } else {
2344                 ret = -ENOBUFS;
2345         }
2346
2347 reclaim:
2348         tb_reclaim_usb3_bandwidth(tb, in, out);
2349         return ret;
2350 }
2351
2352 static void tb_handle_dp_bandwidth_request(struct work_struct *work)
2353 {
2354         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2355         int requested_bw, requested_up, requested_down, ret;
2356         struct tb_port *in, *out;
2357         struct tb_tunnel *tunnel;
2358         struct tb *tb = ev->tb;
2359         struct tb_cm *tcm = tb_priv(tb);
2360         struct tb_switch *sw;
2361
2362         pm_runtime_get_sync(&tb->dev);
2363
2364         mutex_lock(&tb->lock);
2365         if (!tcm->hotplug_active)
2366                 goto unlock;
2367
2368         sw = tb_switch_find_by_route(tb, ev->route);
2369         if (!sw) {
2370                 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
2371                         ev->route);
2372                 goto unlock;
2373         }
2374
2375         in = &sw->ports[ev->port];
2376         if (!tb_port_is_dpin(in)) {
2377                 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
2378                 goto put_sw;
2379         }
2380
2381         tb_port_dbg(in, "handling bandwidth allocation request\n");
2382
2383         if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
2384                 tb_port_warn(in, "bandwidth allocation mode not enabled\n");
2385                 goto put_sw;
2386         }
2387
2388         ret = usb4_dp_port_requested_bandwidth(in);
2389         if (ret < 0) {
2390                 if (ret == -ENODATA)
2391                         tb_port_dbg(in, "no bandwidth request active\n");
2392                 else
2393                         tb_port_warn(in, "failed to read requested bandwidth\n");
2394                 goto put_sw;
2395         }
2396         requested_bw = ret;
2397
2398         tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
2399
2400         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
2401         if (!tunnel) {
2402                 tb_port_warn(in, "failed to find tunnel\n");
2403                 goto put_sw;
2404         }
2405
2406         out = tunnel->dst_port;
2407
2408         if (tb_port_path_direction_downstream(in, out)) {
2409                 requested_up = -1;
2410                 requested_down = requested_bw;
2411         } else {
2412                 requested_up = requested_bw;
2413                 requested_down = -1;
2414         }
2415
2416         ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
2417         if (ret) {
2418                 if (ret == -ENOBUFS)
2419                         tb_tunnel_warn(tunnel,
2420                                        "not enough bandwidth available\n");
2421                 else
2422                         tb_tunnel_warn(tunnel,
2423                                        "failed to change bandwidth allocation\n");
2424         } else {
2425                 tb_tunnel_dbg(tunnel,
2426                               "bandwidth allocation changed to %d/%d Mb/s\n",
2427                               requested_up, requested_down);
2428
2429                 /* Update other clients about the allocation change */
2430                 tb_recalc_estimated_bandwidth(tb);
2431         }
2432
2433 put_sw:
2434         tb_switch_put(sw);
2435 unlock:
2436         mutex_unlock(&tb->lock);
2437
2438         pm_runtime_mark_last_busy(&tb->dev);
2439         pm_runtime_put_autosuspend(&tb->dev);
2440
2441         kfree(ev);
2442 }
2443
2444 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
2445 {
2446         struct tb_hotplug_event *ev;
2447
2448         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
2449         if (!ev)
2450                 return;
2451
2452         ev->tb = tb;
2453         ev->route = route;
2454         ev->port = port;
2455         INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
2456         queue_work(tb->wq, &ev->work);
2457 }
2458
2459 static void tb_handle_notification(struct tb *tb, u64 route,
2460                                    const struct cfg_error_pkg *error)
2461 {
2462
2463         switch (error->error) {
2464         case TB_CFG_ERROR_PCIE_WAKE:
2465         case TB_CFG_ERROR_DP_CON_CHANGE:
2466         case TB_CFG_ERROR_DPTX_DISCOVERY:
2467                 if (tb_cfg_ack_notification(tb->ctl, route, error))
2468                         tb_warn(tb, "could not ack notification on %llx\n",
2469                                 route);
2470                 break;
2471
2472         case TB_CFG_ERROR_DP_BW:
2473                 if (tb_cfg_ack_notification(tb->ctl, route, error))
2474                         tb_warn(tb, "could not ack notification on %llx\n",
2475                                 route);
2476                 tb_queue_dp_bandwidth_request(tb, route, error->port);
2477                 break;
2478
2479         default:
2480                 /* Ignore for now */
2481                 break;
2482         }
2483 }
2484
2485 /*
2486  * tb_schedule_hotplug_handler() - callback function for the control channel
2487  *
2488  * Delegates to tb_handle_hotplug.
2489  */
2490 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
2491                             const void *buf, size_t size)
2492 {
2493         const struct cfg_event_pkg *pkg = buf;
2494         u64 route = tb_cfg_get_route(&pkg->header);
2495
2496         switch (type) {
2497         case TB_CFG_PKG_ERROR:
2498                 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
2499                 return;
2500         case TB_CFG_PKG_EVENT:
2501                 break;
2502         default:
2503                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
2504                 return;
2505         }
2506
2507         if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
2508                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
2509                         pkg->port);
2510         }
2511
2512         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2513 }
2514
2515 static void tb_stop(struct tb *tb)
2516 {
2517         struct tb_cm *tcm = tb_priv(tb);
2518         struct tb_tunnel *tunnel;
2519         struct tb_tunnel *n;
2520
2521         cancel_delayed_work(&tcm->remove_work);
2522         /* tunnels are only present after everything has been initialized */
2523         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2524                 /*
2525                  * DMA tunnels require the driver to be functional so we
2526                  * tear them down. Other protocol tunnels can be left
2527                  * intact.
2528                  */
2529                 if (tb_tunnel_is_dma(tunnel))
2530                         tb_tunnel_deactivate(tunnel);
2531                 tb_tunnel_free(tunnel);
2532         }
2533         tb_switch_remove(tb->root_switch);
2534         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2535 }
2536
2537 static int tb_scan_finalize_switch(struct device *dev, void *data)
2538 {
2539         if (tb_is_switch(dev)) {
2540                 struct tb_switch *sw = tb_to_switch(dev);
2541
2542                 /*
2543                  * If we found that the switch was already setup by the
2544                  * boot firmware, mark it as authorized now before we
2545                  * send uevent to userspace.
2546                  */
2547                 if (sw->boot)
2548                         sw->authorized = 1;
2549
2550                 dev_set_uevent_suppress(dev, false);
2551                 kobject_uevent(&dev->kobj, KOBJ_ADD);
2552                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
2553         }
2554
2555         return 0;
2556 }
2557
2558 static int tb_start(struct tb *tb)
2559 {
2560         struct tb_cm *tcm = tb_priv(tb);
2561         int ret;
2562
2563         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2564         if (IS_ERR(tb->root_switch))
2565                 return PTR_ERR(tb->root_switch);
2566
2567         /*
2568          * ICM firmware upgrade needs running firmware and in native
2569          * mode that is not available so disable firmware upgrade of the
2570          * root switch.
2571          *
2572          * However, USB4 routers support NVM firmware upgrade if they
2573          * implement the necessary router operations.
2574          */
2575         tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
2576         /* All USB4 routers support runtime PM */
2577         tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2578
2579         ret = tb_switch_configure(tb->root_switch);
2580         if (ret) {
2581                 tb_switch_put(tb->root_switch);
2582                 return ret;
2583         }
2584
2585         /* Announce the switch to the world */
2586         ret = tb_switch_add(tb->root_switch);
2587         if (ret) {
2588                 tb_switch_put(tb->root_switch);
2589                 return ret;
2590         }
2591
2592         /*
2593          * To support highest CLx state, we set host router's TMU to
2594          * Normal mode.
2595          */
2596         tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2597         /* Enable TMU if it is off */
2598         tb_switch_tmu_enable(tb->root_switch);
2599         /* Full scan to discover devices added before the driver was loaded. */
2600         tb_scan_switch(tb->root_switch);
2601         /* Find out tunnels created by the boot firmware */
2602         tb_discover_tunnels(tb);
2603         /* Add DP resources from the DP tunnels created by the boot firmware */
2604         tb_discover_dp_resources(tb);
2605         /*
2606          * If the boot firmware did not create USB 3.x tunnels create them
2607          * now for the whole topology.
2608          */
2609         tb_create_usb3_tunnels(tb->root_switch);
2610         /* Add DP IN resources for the root switch */
2611         tb_add_dp_resources(tb->root_switch);
2612         /* Make the discovered switches available to the userspace */
2613         device_for_each_child(&tb->root_switch->dev, NULL,
2614                               tb_scan_finalize_switch);
2615
2616         /* Allow tb_handle_hotplug to progress events */
2617         tcm->hotplug_active = true;
2618         return 0;
2619 }
2620
2621 static int tb_suspend_noirq(struct tb *tb)
2622 {
2623         struct tb_cm *tcm = tb_priv(tb);
2624
2625         tb_dbg(tb, "suspending...\n");
2626         tb_disconnect_and_release_dp(tb);
2627         tb_switch_suspend(tb->root_switch, false);
2628         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2629         tb_dbg(tb, "suspend finished\n");
2630
2631         return 0;
2632 }
2633
2634 static void tb_restore_children(struct tb_switch *sw)
2635 {
2636         struct tb_port *port;
2637
2638         /* No need to restore if the router is already unplugged */
2639         if (sw->is_unplugged)
2640                 return;
2641
2642         if (tb_enable_clx(sw))
2643                 tb_sw_warn(sw, "failed to re-enable CL states\n");
2644
2645         if (tb_enable_tmu(sw))
2646                 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2647
2648         tb_switch_configuration_valid(sw);
2649
2650         tb_switch_for_each_port(sw, port) {
2651                 if (!tb_port_has_remote(port) && !port->xdomain)
2652                         continue;
2653
2654                 if (port->remote) {
2655                         tb_switch_set_link_width(port->remote->sw,
2656                                                  port->remote->sw->link_width);
2657                         tb_switch_configure_link(port->remote->sw);
2658
2659                         tb_restore_children(port->remote->sw);
2660                 } else if (port->xdomain) {
2661                         tb_port_configure_xdomain(port, port->xdomain);
2662                 }
2663         }
2664 }
2665
2666 static int tb_resume_noirq(struct tb *tb)
2667 {
2668         struct tb_cm *tcm = tb_priv(tb);
2669         struct tb_tunnel *tunnel, *n;
2670         unsigned int usb3_delay = 0;
2671         LIST_HEAD(tunnels);
2672
2673         tb_dbg(tb, "resuming...\n");
2674
2675         /* remove any pci devices the firmware might have setup */
2676         tb_switch_reset(tb->root_switch);
2677
2678         tb_switch_resume(tb->root_switch);
2679         tb_free_invalid_tunnels(tb);
2680         tb_free_unplugged_children(tb->root_switch);
2681         tb_restore_children(tb->root_switch);
2682
2683         /*
2684          * If we get here from suspend to disk the boot firmware or the
2685          * restore kernel might have created tunnels of its own. Since
2686          * we cannot be sure they are usable for us we find and tear
2687          * them down.
2688          */
2689         tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2690         list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
2691                 if (tb_tunnel_is_usb3(tunnel))
2692                         usb3_delay = 500;
2693                 tb_tunnel_deactivate(tunnel);
2694                 tb_tunnel_free(tunnel);
2695         }
2696
2697         /* Re-create our tunnels now */
2698         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2699                 /* USB3 requires delay before it can be re-activated */
2700                 if (tb_tunnel_is_usb3(tunnel)) {
2701                         msleep(usb3_delay);
2702                         /* Only need to do it once */
2703                         usb3_delay = 0;
2704                 }
2705                 tb_tunnel_restart(tunnel);
2706         }
2707         if (!list_empty(&tcm->tunnel_list)) {
2708                 /*
2709                  * the pcie links need some time to get going.
2710                  * 100ms works for me...
2711                  */
2712                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
2713                 msleep(100);
2714         }
2715          /* Allow tb_handle_hotplug to progress events */
2716         tcm->hotplug_active = true;
2717         tb_dbg(tb, "resume finished\n");
2718
2719         return 0;
2720 }
2721
2722 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2723 {
2724         struct tb_port *port;
2725         int ret = 0;
2726
2727         tb_switch_for_each_port(sw, port) {
2728                 if (tb_is_upstream_port(port))
2729                         continue;
2730                 if (port->xdomain && port->xdomain->is_unplugged) {
2731                         tb_retimer_remove_all(port);
2732                         tb_xdomain_remove(port->xdomain);
2733                         tb_port_unconfigure_xdomain(port);
2734                         port->xdomain = NULL;
2735                         ret++;
2736                 } else if (port->remote) {
2737                         ret += tb_free_unplugged_xdomains(port->remote->sw);
2738                 }
2739         }
2740
2741         return ret;
2742 }
2743
2744 static int tb_freeze_noirq(struct tb *tb)
2745 {
2746         struct tb_cm *tcm = tb_priv(tb);
2747
2748         tcm->hotplug_active = false;
2749         return 0;
2750 }
2751
2752 static int tb_thaw_noirq(struct tb *tb)
2753 {
2754         struct tb_cm *tcm = tb_priv(tb);
2755
2756         tcm->hotplug_active = true;
2757         return 0;
2758 }
2759
2760 static void tb_complete(struct tb *tb)
2761 {
2762         /*
2763          * Release any unplugged XDomains and if there is a case where
2764          * another domain is swapped in place of unplugged XDomain we
2765          * need to run another rescan.
2766          */
2767         mutex_lock(&tb->lock);
2768         if (tb_free_unplugged_xdomains(tb->root_switch))
2769                 tb_scan_switch(tb->root_switch);
2770         mutex_unlock(&tb->lock);
2771 }
2772
2773 static int tb_runtime_suspend(struct tb *tb)
2774 {
2775         struct tb_cm *tcm = tb_priv(tb);
2776
2777         mutex_lock(&tb->lock);
2778         tb_switch_suspend(tb->root_switch, true);
2779         tcm->hotplug_active = false;
2780         mutex_unlock(&tb->lock);
2781
2782         return 0;
2783 }
2784
2785 static void tb_remove_work(struct work_struct *work)
2786 {
2787         struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
2788         struct tb *tb = tcm_to_tb(tcm);
2789
2790         mutex_lock(&tb->lock);
2791         if (tb->root_switch) {
2792                 tb_free_unplugged_children(tb->root_switch);
2793                 tb_free_unplugged_xdomains(tb->root_switch);
2794         }
2795         mutex_unlock(&tb->lock);
2796 }
2797
2798 static int tb_runtime_resume(struct tb *tb)
2799 {
2800         struct tb_cm *tcm = tb_priv(tb);
2801         struct tb_tunnel *tunnel, *n;
2802
2803         mutex_lock(&tb->lock);
2804         tb_switch_resume(tb->root_switch);
2805         tb_free_invalid_tunnels(tb);
2806         tb_restore_children(tb->root_switch);
2807         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
2808                 tb_tunnel_restart(tunnel);
2809         tcm->hotplug_active = true;
2810         mutex_unlock(&tb->lock);
2811
2812         /*
2813          * Schedule cleanup of any unplugged devices. Run this in a
2814          * separate thread to avoid possible deadlock if the device
2815          * removal runtime resumes the unplugged device.
2816          */
2817         queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
2818         return 0;
2819 }
2820
2821 static const struct tb_cm_ops tb_cm_ops = {
2822         .start = tb_start,
2823         .stop = tb_stop,
2824         .suspend_noirq = tb_suspend_noirq,
2825         .resume_noirq = tb_resume_noirq,
2826         .freeze_noirq = tb_freeze_noirq,
2827         .thaw_noirq = tb_thaw_noirq,
2828         .complete = tb_complete,
2829         .runtime_suspend = tb_runtime_suspend,
2830         .runtime_resume = tb_runtime_resume,
2831         .handle_event = tb_handle_event,
2832         .disapprove_switch = tb_disconnect_pci,
2833         .approve_switch = tb_tunnel_pci,
2834         .approve_xdomain_paths = tb_approve_xdomain_paths,
2835         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
2836 };
2837
2838 /*
2839  * During suspend the Thunderbolt controller is reset and all PCIe
2840  * tunnels are lost. The NHI driver will try to reestablish all tunnels
2841  * during resume. This adds device links between the tunneled PCIe
2842  * downstream ports and the NHI so that the device core will make sure
2843  * NHI is resumed first before the rest.
2844  */
2845 static bool tb_apple_add_links(struct tb_nhi *nhi)
2846 {
2847         struct pci_dev *upstream, *pdev;
2848         bool ret;
2849
2850         if (!x86_apple_machine)
2851                 return false;
2852
2853         switch (nhi->pdev->device) {
2854         case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2855         case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2856         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2857         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2858                 break;
2859         default:
2860                 return false;
2861         }
2862
2863         upstream = pci_upstream_bridge(nhi->pdev);
2864         while (upstream) {
2865                 if (!pci_is_pcie(upstream))
2866                         return false;
2867                 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2868                         break;
2869                 upstream = pci_upstream_bridge(upstream);
2870         }
2871
2872         if (!upstream)
2873                 return false;
2874
2875         /*
2876          * For each hotplug downstream port, create add device link
2877          * back to NHI so that PCIe tunnels can be re-established after
2878          * sleep.
2879          */
2880         ret = false;
2881         for_each_pci_bridge(pdev, upstream->subordinate) {
2882                 const struct device_link *link;
2883
2884                 if (!pci_is_pcie(pdev))
2885                         continue;
2886                 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2887                     !pdev->is_hotplug_bridge)
2888                         continue;
2889
2890                 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2891                                        DL_FLAG_AUTOREMOVE_SUPPLIER |
2892                                        DL_FLAG_PM_RUNTIME);
2893                 if (link) {
2894                         dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2895                                 dev_name(&pdev->dev));
2896                         ret = true;
2897                 } else {
2898                         dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2899                                  dev_name(&pdev->dev));
2900                 }
2901         }
2902
2903         return ret;
2904 }
2905
2906 struct tb *tb_probe(struct tb_nhi *nhi)
2907 {
2908         struct tb_cm *tcm;
2909         struct tb *tb;
2910
2911         tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
2912         if (!tb)
2913                 return NULL;
2914
2915         if (tb_acpi_may_tunnel_pcie())
2916                 tb->security_level = TB_SECURITY_USER;
2917         else
2918                 tb->security_level = TB_SECURITY_NOPCIE;
2919
2920         tb->cm_ops = &tb_cm_ops;
2921
2922         tcm = tb_priv(tb);
2923         INIT_LIST_HEAD(&tcm->tunnel_list);
2924         INIT_LIST_HEAD(&tcm->dp_resources);
2925         INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
2926         tb_init_bandwidth_groups(tcm);
2927
2928         tb_dbg(tb, "using software connection manager\n");
2929
2930         /*
2931          * Device links are needed to make sure we establish tunnels
2932          * before the PCIe/USB stack is resumed so complain here if we
2933          * found them missing.
2934          */
2935         if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
2936                 tb_warn(tb, "device links to tunneled native ports are missing!\n");
2937
2938         return tb;
2939 }
This page took 0.218255 seconds and 4 git commands to generate.