]> Git Repo - linux.git/blob - drivers/thunderbolt/tb.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <[email protected]>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
14
15 #include "tb.h"
16 #include "tb_regs.h"
17 #include "tunnel.h"
18
19 #define TB_TIMEOUT              100     /* ms */
20 #define TB_RELEASE_BW_TIMEOUT   10000   /* ms */
21
22 /*
23  * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
24  * direction. This is 40G - 10% guard band bandwidth.
25  */
26 #define TB_ASYM_MIN             (40000 * 90 / 100)
27
28 /*
29  * Threshold bandwidth (in Mb/s) that is used to switch the links to
30  * asymmetric and back. This is selected as 45G which means when the
31  * request is higher than this, we switch the link to asymmetric, and
32  * when it is less than this we switch it back. The 45G is selected so
33  * that we still have 27G (of the total 72G) for bulk PCIe traffic when
34  * switching back to symmetric.
35  */
36 #define TB_ASYM_THRESHOLD       45000
37
38 #define MAX_GROUPS              7       /* max Group_ID is 7 */
39
40 static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
41 module_param_named(asym_threshold, asym_threshold, uint, 0444);
42 MODULE_PARM_DESC(asym_threshold,
43                 "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
44                 __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
45
46 /**
47  * struct tb_cm - Simple Thunderbolt connection manager
48  * @tunnel_list: List of active tunnels
49  * @dp_resources: List of available DP resources for DP tunneling
50  * @hotplug_active: tb_handle_hotplug will stop progressing plug
51  *                  events and exit if this is not set (it needs to
52  *                  acquire the lock one more time). Used to drain wq
53  *                  after cfg has been paused.
54  * @remove_work: Work used to remove any unplugged routers after
55  *               runtime resume
56  * @groups: Bandwidth groups used in this domain.
57  */
58 struct tb_cm {
59         struct list_head tunnel_list;
60         struct list_head dp_resources;
61         bool hotplug_active;
62         struct delayed_work remove_work;
63         struct tb_bandwidth_group groups[MAX_GROUPS];
64 };
65
66 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
67 {
68         return ((void *)tcm - sizeof(struct tb));
69 }
70
71 struct tb_hotplug_event {
72         struct work_struct work;
73         struct tb *tb;
74         u64 route;
75         u8 port;
76         bool unplug;
77 };
78
79 static void tb_handle_hotplug(struct work_struct *work);
80
81 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
82 {
83         struct tb_hotplug_event *ev;
84
85         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
86         if (!ev)
87                 return;
88
89         ev->tb = tb;
90         ev->route = route;
91         ev->port = port;
92         ev->unplug = unplug;
93         INIT_WORK(&ev->work, tb_handle_hotplug);
94         queue_work(tb->wq, &ev->work);
95 }
96
97 /* enumeration & hot plug handling */
98
99 static void tb_add_dp_resources(struct tb_switch *sw)
100 {
101         struct tb_cm *tcm = tb_priv(sw->tb);
102         struct tb_port *port;
103
104         tb_switch_for_each_port(sw, port) {
105                 if (!tb_port_is_dpin(port))
106                         continue;
107
108                 if (!tb_switch_query_dp_resource(sw, port))
109                         continue;
110
111                 /*
112                  * If DP IN on device router exist, position it at the
113                  * beginning of the DP resources list, so that it is used
114                  * before DP IN of the host router. This way external GPU(s)
115                  * will be prioritized when pairing DP IN to a DP OUT.
116                  */
117                 if (tb_route(sw))
118                         list_add(&port->list, &tcm->dp_resources);
119                 else
120                         list_add_tail(&port->list, &tcm->dp_resources);
121
122                 tb_port_dbg(port, "DP IN resource available\n");
123         }
124 }
125
126 static void tb_remove_dp_resources(struct tb_switch *sw)
127 {
128         struct tb_cm *tcm = tb_priv(sw->tb);
129         struct tb_port *port, *tmp;
130
131         /* Clear children resources first */
132         tb_switch_for_each_port(sw, port) {
133                 if (tb_port_has_remote(port))
134                         tb_remove_dp_resources(port->remote->sw);
135         }
136
137         list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
138                 if (port->sw == sw) {
139                         tb_port_dbg(port, "DP OUT resource unavailable\n");
140                         list_del_init(&port->list);
141                 }
142         }
143 }
144
145 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
146 {
147         struct tb_cm *tcm = tb_priv(tb);
148         struct tb_port *p;
149
150         list_for_each_entry(p, &tcm->dp_resources, list) {
151                 if (p == port)
152                         return;
153         }
154
155         tb_port_dbg(port, "DP %s resource available discovered\n",
156                     tb_port_is_dpin(port) ? "IN" : "OUT");
157         list_add_tail(&port->list, &tcm->dp_resources);
158 }
159
160 static void tb_discover_dp_resources(struct tb *tb)
161 {
162         struct tb_cm *tcm = tb_priv(tb);
163         struct tb_tunnel *tunnel;
164
165         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
166                 if (tb_tunnel_is_dp(tunnel))
167                         tb_discover_dp_resource(tb, tunnel->dst_port);
168         }
169 }
170
171 /* Enables CL states up to host router */
172 static int tb_enable_clx(struct tb_switch *sw)
173 {
174         struct tb_cm *tcm = tb_priv(sw->tb);
175         unsigned int clx = TB_CL0S | TB_CL1;
176         const struct tb_tunnel *tunnel;
177         int ret;
178
179         /*
180          * Currently only enable CLx for the first link. This is enough
181          * to allow the CPU to save energy at least on Intel hardware
182          * and makes it slightly simpler to implement. We may change
183          * this in the future to cover the whole topology if it turns
184          * out to be beneficial.
185          */
186         while (sw && tb_switch_depth(sw) > 1)
187                 sw = tb_switch_parent(sw);
188
189         if (!sw)
190                 return 0;
191
192         if (tb_switch_depth(sw) != 1)
193                 return 0;
194
195         /*
196          * If we are re-enabling then check if there is an active DMA
197          * tunnel and in that case bail out.
198          */
199         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
200                 if (tb_tunnel_is_dma(tunnel)) {
201                         if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
202                                 return 0;
203                 }
204         }
205
206         /*
207          * Initially try with CL2. If that's not supported by the
208          * topology try with CL0s and CL1 and then give up.
209          */
210         ret = tb_switch_clx_enable(sw, clx | TB_CL2);
211         if (ret == -EOPNOTSUPP)
212                 ret = tb_switch_clx_enable(sw, clx);
213         return ret == -EOPNOTSUPP ? 0 : ret;
214 }
215
216 /**
217  * tb_disable_clx() - Disable CL states up to host router
218  * @sw: Router to start
219  *
220  * Disables CL states from @sw up to the host router. Returns true if
221  * any CL state were disabled. This can be used to figure out whether
222  * the link was setup by us or the boot firmware so we don't
223  * accidentally enable them if they were not enabled during discovery.
224  */
225 static bool tb_disable_clx(struct tb_switch *sw)
226 {
227         bool disabled = false;
228
229         do {
230                 int ret;
231
232                 ret = tb_switch_clx_disable(sw);
233                 if (ret > 0)
234                         disabled = true;
235                 else if (ret < 0)
236                         tb_sw_warn(sw, "failed to disable CL states\n");
237
238                 sw = tb_switch_parent(sw);
239         } while (sw);
240
241         return disabled;
242 }
243
244 static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
245 {
246         struct tb_switch *sw;
247
248         sw = tb_to_switch(dev);
249         if (!sw)
250                 return 0;
251
252         if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
253                 enum tb_switch_tmu_mode mode;
254                 int ret;
255
256                 if (tb_switch_clx_is_enabled(sw, TB_CL1))
257                         mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
258                 else
259                         mode = TB_SWITCH_TMU_MODE_HIFI_BI;
260
261                 ret = tb_switch_tmu_configure(sw, mode);
262                 if (ret)
263                         return ret;
264
265                 return tb_switch_tmu_enable(sw);
266         }
267
268         return 0;
269 }
270
271 static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
272 {
273         struct tb_switch *sw;
274
275         if (!tunnel)
276                 return;
277
278         /*
279          * Once first DP tunnel is established we change the TMU
280          * accuracy of first depth child routers (and the host router)
281          * to the highest. This is needed for the DP tunneling to work
282          * but also allows CL0s.
283          *
284          * If both routers are v2 then we don't need to do anything as
285          * they are using enhanced TMU mode that allows all CLx.
286          */
287         sw = tunnel->tb->root_switch;
288         device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
289 }
290
291 static int tb_enable_tmu(struct tb_switch *sw)
292 {
293         int ret;
294
295         /*
296          * If both routers at the end of the link are v2 we simply
297          * enable the enhanched uni-directional mode. That covers all
298          * the CL states. For v1 and before we need to use the normal
299          * rate to allow CL1 (when supported). Otherwise we keep the TMU
300          * running at the highest accuracy.
301          */
302         ret = tb_switch_tmu_configure(sw,
303                         TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
304         if (ret == -EOPNOTSUPP) {
305                 if (tb_switch_clx_is_enabled(sw, TB_CL1))
306                         ret = tb_switch_tmu_configure(sw,
307                                         TB_SWITCH_TMU_MODE_LOWRES);
308                 else
309                         ret = tb_switch_tmu_configure(sw,
310                                         TB_SWITCH_TMU_MODE_HIFI_BI);
311         }
312         if (ret)
313                 return ret;
314
315         /* If it is already enabled in correct mode, don't touch it */
316         if (tb_switch_tmu_is_enabled(sw))
317                 return 0;
318
319         ret = tb_switch_tmu_disable(sw);
320         if (ret)
321                 return ret;
322
323         ret = tb_switch_tmu_post_time(sw);
324         if (ret)
325                 return ret;
326
327         return tb_switch_tmu_enable(sw);
328 }
329
330 static void tb_switch_discover_tunnels(struct tb_switch *sw,
331                                        struct list_head *list,
332                                        bool alloc_hopids)
333 {
334         struct tb *tb = sw->tb;
335         struct tb_port *port;
336
337         tb_switch_for_each_port(sw, port) {
338                 struct tb_tunnel *tunnel = NULL;
339
340                 switch (port->config.type) {
341                 case TB_TYPE_DP_HDMI_IN:
342                         tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
343                         tb_increase_tmu_accuracy(tunnel);
344                         break;
345
346                 case TB_TYPE_PCIE_DOWN:
347                         tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
348                         break;
349
350                 case TB_TYPE_USB3_DOWN:
351                         tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
352                         break;
353
354                 default:
355                         break;
356                 }
357
358                 if (tunnel)
359                         list_add_tail(&tunnel->list, list);
360         }
361
362         tb_switch_for_each_port(sw, port) {
363                 if (tb_port_has_remote(port)) {
364                         tb_switch_discover_tunnels(port->remote->sw, list,
365                                                    alloc_hopids);
366                 }
367         }
368 }
369
370 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
371 {
372         if (tb_switch_is_usb4(port->sw))
373                 return usb4_port_configure_xdomain(port, xd);
374         return tb_lc_configure_xdomain(port);
375 }
376
377 static void tb_port_unconfigure_xdomain(struct tb_port *port)
378 {
379         if (tb_switch_is_usb4(port->sw))
380                 usb4_port_unconfigure_xdomain(port);
381         else
382                 tb_lc_unconfigure_xdomain(port);
383 }
384
385 static void tb_scan_xdomain(struct tb_port *port)
386 {
387         struct tb_switch *sw = port->sw;
388         struct tb *tb = sw->tb;
389         struct tb_xdomain *xd;
390         u64 route;
391
392         if (!tb_is_xdomain_enabled())
393                 return;
394
395         route = tb_downstream_route(port);
396         xd = tb_xdomain_find_by_route(tb, route);
397         if (xd) {
398                 tb_xdomain_put(xd);
399                 return;
400         }
401
402         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
403                               NULL);
404         if (xd) {
405                 tb_port_at(route, sw)->xdomain = xd;
406                 tb_port_configure_xdomain(port, xd);
407                 tb_xdomain_add(xd);
408         }
409 }
410
411 /**
412  * tb_find_unused_port() - return the first inactive port on @sw
413  * @sw: Switch to find the port on
414  * @type: Port type to look for
415  */
416 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
417                                            enum tb_port_type type)
418 {
419         struct tb_port *port;
420
421         tb_switch_for_each_port(sw, port) {
422                 if (tb_is_upstream_port(port))
423                         continue;
424                 if (port->config.type != type)
425                         continue;
426                 if (!port->cap_adap)
427                         continue;
428                 if (tb_port_is_enabled(port))
429                         continue;
430                 return port;
431         }
432         return NULL;
433 }
434
435 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
436                                          const struct tb_port *port)
437 {
438         struct tb_port *down;
439
440         down = usb4_switch_map_usb3_down(sw, port);
441         if (down && !tb_usb3_port_is_enabled(down))
442                 return down;
443         return NULL;
444 }
445
446 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
447                                         struct tb_port *src_port,
448                                         struct tb_port *dst_port)
449 {
450         struct tb_cm *tcm = tb_priv(tb);
451         struct tb_tunnel *tunnel;
452
453         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
454                 if (tunnel->type == type &&
455                     ((src_port && src_port == tunnel->src_port) ||
456                      (dst_port && dst_port == tunnel->dst_port))) {
457                         return tunnel;
458                 }
459         }
460
461         return NULL;
462 }
463
464 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
465                                                    struct tb_port *src_port,
466                                                    struct tb_port *dst_port)
467 {
468         struct tb_port *port, *usb3_down;
469         struct tb_switch *sw;
470
471         /* Pick the router that is deepest in the topology */
472         if (tb_port_path_direction_downstream(src_port, dst_port))
473                 sw = dst_port->sw;
474         else
475                 sw = src_port->sw;
476
477         /* Can't be the host router */
478         if (sw == tb->root_switch)
479                 return NULL;
480
481         /* Find the downstream USB4 port that leads to this router */
482         port = tb_port_at(tb_route(sw), tb->root_switch);
483         /* Find the corresponding host router USB3 downstream port */
484         usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
485         if (!usb3_down)
486                 return NULL;
487
488         return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
489 }
490
491 /**
492  * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
493  * @tb: Domain structure
494  * @src_port: Source protocol adapter
495  * @dst_port: Destination protocol adapter
496  * @port: USB4 port the consumed bandwidth is calculated
497  * @consumed_up: Consumed upsream bandwidth (Mb/s)
498  * @consumed_down: Consumed downstream bandwidth (Mb/s)
499  *
500  * Calculates consumed USB3 and PCIe bandwidth at @port between path
501  * from @src_port to @dst_port. Does not take USB3 tunnel starting from
502  * @src_port and ending on @src_port into account because that bandwidth is
503  * already included in as part of the "first hop" USB3 tunnel.
504  */
505 static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
506                                            struct tb_port *src_port,
507                                            struct tb_port *dst_port,
508                                            struct tb_port *port,
509                                            int *consumed_up,
510                                            int *consumed_down)
511 {
512         int pci_consumed_up, pci_consumed_down;
513         struct tb_tunnel *tunnel;
514
515         *consumed_up = *consumed_down = 0;
516
517         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
518         if (tunnel && !tb_port_is_usb3_down(src_port) &&
519             !tb_port_is_usb3_up(dst_port)) {
520                 int ret;
521
522                 ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
523                                                    consumed_down);
524                 if (ret)
525                         return ret;
526         }
527
528         /*
529          * If there is anything reserved for PCIe bulk traffic take it
530          * into account here too.
531          */
532         if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
533                 *consumed_up += pci_consumed_up;
534                 *consumed_down += pci_consumed_down;
535         }
536
537         return 0;
538 }
539
540 /**
541  * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
542  * @tb: Domain structure
543  * @src_port: Source protocol adapter
544  * @dst_port: Destination protocol adapter
545  * @port: USB4 port the consumed bandwidth is calculated
546  * @consumed_up: Consumed upsream bandwidth (Mb/s)
547  * @consumed_down: Consumed downstream bandwidth (Mb/s)
548  *
549  * Calculates consumed DP bandwidth at @port between path from @src_port
550  * to @dst_port. Does not take tunnel starting from @src_port and ending
551  * from @src_port into account.
552  *
553  * If there is bandwidth reserved for any of the groups between
554  * @src_port and @dst_port (but not yet used) that is also taken into
555  * account in the returned consumed bandwidth.
556  */
557 static int tb_consumed_dp_bandwidth(struct tb *tb,
558                                     struct tb_port *src_port,
559                                     struct tb_port *dst_port,
560                                     struct tb_port *port,
561                                     int *consumed_up,
562                                     int *consumed_down)
563 {
564         int group_reserved[MAX_GROUPS] = {};
565         struct tb_cm *tcm = tb_priv(tb);
566         struct tb_tunnel *tunnel;
567         bool downstream;
568         int i, ret;
569
570         *consumed_up = *consumed_down = 0;
571
572         /*
573          * Find all DP tunnels that cross the port and reduce
574          * their consumed bandwidth from the available.
575          */
576         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
577                 const struct tb_bandwidth_group *group;
578                 int dp_consumed_up, dp_consumed_down;
579
580                 if (tb_tunnel_is_invalid(tunnel))
581                         continue;
582
583                 if (!tb_tunnel_is_dp(tunnel))
584                         continue;
585
586                 if (!tb_tunnel_port_on_path(tunnel, port))
587                         continue;
588
589                 /*
590                  * Calculate what is reserved for groups crossing the
591                  * same ports only once (as that is reserved for all the
592                  * tunnels in the group).
593                  */
594                 group = tunnel->src_port->group;
595                 if (group && group->reserved && !group_reserved[group->index])
596                         group_reserved[group->index] = group->reserved;
597
598                 /*
599                  * Ignore the DP tunnel between src_port and dst_port
600                  * because it is the same tunnel and we may be
601                  * re-calculating estimated bandwidth.
602                  */
603                 if (tunnel->src_port == src_port &&
604                     tunnel->dst_port == dst_port)
605                         continue;
606
607                 ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
608                                                    &dp_consumed_down);
609                 if (ret)
610                         return ret;
611
612                 *consumed_up += dp_consumed_up;
613                 *consumed_down += dp_consumed_down;
614         }
615
616         downstream = tb_port_path_direction_downstream(src_port, dst_port);
617         for (i = 0; i < ARRAY_SIZE(group_reserved); i++) {
618                 if (downstream)
619                         *consumed_down += group_reserved[i];
620                 else
621                         *consumed_up += group_reserved[i];
622         }
623
624         return 0;
625 }
626
627 static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
628                               struct tb_port *port)
629 {
630         bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
631         enum tb_link_width width;
632
633         if (tb_is_upstream_port(port))
634                 width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
635         else
636                 width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
637
638         return tb_port_width_supported(port, width);
639 }
640
641 /**
642  * tb_maximum_bandwidth() - Maximum bandwidth over a single link
643  * @tb: Domain structure
644  * @src_port: Source protocol adapter
645  * @dst_port: Destination protocol adapter
646  * @port: USB4 port the total bandwidth is calculated
647  * @max_up: Maximum upstream bandwidth (Mb/s)
648  * @max_down: Maximum downstream bandwidth (Mb/s)
649  * @include_asym: Include bandwidth if the link is switched from
650  *                symmetric to asymmetric
651  *
652  * Returns maximum possible bandwidth in @max_up and @max_down over a
653  * single link at @port. If @include_asym is set then includes the
654  * additional banwdith if the links are transitioned into asymmetric to
655  * direction from @src_port to @dst_port.
656  */
657 static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
658                                 struct tb_port *dst_port, struct tb_port *port,
659                                 int *max_up, int *max_down, bool include_asym)
660 {
661         bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
662         int link_speed, link_width, up_bw, down_bw;
663
664         /*
665          * Can include asymmetric, only if it is actually supported by
666          * the lane adapter.
667          */
668         if (!tb_asym_supported(src_port, dst_port, port))
669                 include_asym = false;
670
671         if (tb_is_upstream_port(port)) {
672                 link_speed = port->sw->link_speed;
673                 /*
674                  * sw->link_width is from upstream perspective so we use
675                  * the opposite for downstream of the host router.
676                  */
677                 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
678                         up_bw = link_speed * 3 * 1000;
679                         down_bw = link_speed * 1 * 1000;
680                 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
681                         up_bw = link_speed * 1 * 1000;
682                         down_bw = link_speed * 3 * 1000;
683                 } else if (include_asym) {
684                         /*
685                          * The link is symmetric at the moment but we
686                          * can switch it to asymmetric as needed. Report
687                          * this bandwidth as available (even though it
688                          * is not yet enabled).
689                          */
690                         if (downstream) {
691                                 up_bw = link_speed * 1 * 1000;
692                                 down_bw = link_speed * 3 * 1000;
693                         } else {
694                                 up_bw = link_speed * 3 * 1000;
695                                 down_bw = link_speed * 1 * 1000;
696                         }
697                 } else {
698                         up_bw = link_speed * port->sw->link_width * 1000;
699                         down_bw = up_bw;
700                 }
701         } else {
702                 link_speed = tb_port_get_link_speed(port);
703                 if (link_speed < 0)
704                         return link_speed;
705
706                 link_width = tb_port_get_link_width(port);
707                 if (link_width < 0)
708                         return link_width;
709
710                 if (link_width == TB_LINK_WIDTH_ASYM_TX) {
711                         up_bw = link_speed * 1 * 1000;
712                         down_bw = link_speed * 3 * 1000;
713                 } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
714                         up_bw = link_speed * 3 * 1000;
715                         down_bw = link_speed * 1 * 1000;
716                 } else if (include_asym) {
717                         /*
718                          * The link is symmetric at the moment but we
719                          * can switch it to asymmetric as needed. Report
720                          * this bandwidth as available (even though it
721                          * is not yet enabled).
722                          */
723                         if (downstream) {
724                                 up_bw = link_speed * 1 * 1000;
725                                 down_bw = link_speed * 3 * 1000;
726                         } else {
727                                 up_bw = link_speed * 3 * 1000;
728                                 down_bw = link_speed * 1 * 1000;
729                         }
730                 } else {
731                         up_bw = link_speed * link_width * 1000;
732                         down_bw = up_bw;
733                 }
734         }
735
736         /* Leave 10% guard band */
737         *max_up = up_bw - up_bw / 10;
738         *max_down = down_bw - down_bw / 10;
739
740         tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
741         return 0;
742 }
743
744 /**
745  * tb_available_bandwidth() - Available bandwidth for tunneling
746  * @tb: Domain structure
747  * @src_port: Source protocol adapter
748  * @dst_port: Destination protocol adapter
749  * @available_up: Available bandwidth upstream (Mb/s)
750  * @available_down: Available bandwidth downstream (Mb/s)
751  * @include_asym: Include bandwidth if the link is switched from
752  *                symmetric to asymmetric
753  *
754  * Calculates maximum available bandwidth for protocol tunneling between
755  * @src_port and @dst_port at the moment. This is minimum of maximum
756  * link bandwidth across all links reduced by currently consumed
757  * bandwidth on that link.
758  *
759  * If @include_asym is true then includes also bandwidth that can be
760  * added when the links are transitioned into asymmetric (but does not
761  * transition the links).
762  */
763 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
764                                  struct tb_port *dst_port, int *available_up,
765                                  int *available_down, bool include_asym)
766 {
767         struct tb_port *port;
768         int ret;
769
770         /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
771         *available_up = *available_down = 120000;
772
773         /* Find the minimum available bandwidth over all links */
774         tb_for_each_port_on_path(src_port, dst_port, port) {
775                 int max_up, max_down, consumed_up, consumed_down;
776
777                 if (!tb_port_is_null(port))
778                         continue;
779
780                 ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
781                                            &max_up, &max_down, include_asym);
782                 if (ret)
783                         return ret;
784
785                 ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
786                                                       port, &consumed_up,
787                                                       &consumed_down);
788                 if (ret)
789                         return ret;
790                 max_up -= consumed_up;
791                 max_down -= consumed_down;
792
793                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
794                                                &consumed_up, &consumed_down);
795                 if (ret)
796                         return ret;
797                 max_up -= consumed_up;
798                 max_down -= consumed_down;
799
800                 if (max_up < *available_up)
801                         *available_up = max_up;
802                 if (max_down < *available_down)
803                         *available_down = max_down;
804         }
805
806         if (*available_up < 0)
807                 *available_up = 0;
808         if (*available_down < 0)
809                 *available_down = 0;
810
811         return 0;
812 }
813
814 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
815                                             struct tb_port *src_port,
816                                             struct tb_port *dst_port)
817 {
818         struct tb_tunnel *tunnel;
819
820         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
821         return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
822 }
823
824 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
825                                       struct tb_port *dst_port)
826 {
827         int ret, available_up, available_down;
828         struct tb_tunnel *tunnel;
829
830         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
831         if (!tunnel)
832                 return;
833
834         tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n");
835
836         /*
837          * Calculate available bandwidth for the first hop USB3 tunnel.
838          * That determines the whole USB3 bandwidth for this branch.
839          */
840         ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
841                                      &available_up, &available_down, false);
842         if (ret) {
843                 tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n");
844                 return;
845         }
846
847         tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up,
848                       available_down);
849
850         tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
851 }
852
853 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
854 {
855         struct tb_switch *parent = tb_switch_parent(sw);
856         int ret, available_up, available_down;
857         struct tb_port *up, *down, *port;
858         struct tb_cm *tcm = tb_priv(tb);
859         struct tb_tunnel *tunnel;
860
861         if (!tb_acpi_may_tunnel_usb3()) {
862                 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
863                 return 0;
864         }
865
866         up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
867         if (!up)
868                 return 0;
869
870         if (!sw->link_usb4)
871                 return 0;
872
873         /*
874          * Look up available down port. Since we are chaining it should
875          * be found right above this switch.
876          */
877         port = tb_switch_downstream_port(sw);
878         down = tb_find_usb3_down(parent, port);
879         if (!down)
880                 return 0;
881
882         if (tb_route(parent)) {
883                 struct tb_port *parent_up;
884                 /*
885                  * Check first that the parent switch has its upstream USB3
886                  * port enabled. Otherwise the chain is not complete and
887                  * there is no point setting up a new tunnel.
888                  */
889                 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
890                 if (!parent_up || !tb_port_is_enabled(parent_up))
891                         return 0;
892
893                 /* Make all unused bandwidth available for the new tunnel */
894                 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
895                 if (ret)
896                         return ret;
897         }
898
899         ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
900                                      false);
901         if (ret)
902                 goto err_reclaim;
903
904         tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
905                     available_up, available_down);
906
907         tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
908                                       available_down);
909         if (!tunnel) {
910                 ret = -ENOMEM;
911                 goto err_reclaim;
912         }
913
914         if (tb_tunnel_activate(tunnel)) {
915                 tb_port_info(up,
916                              "USB3 tunnel activation failed, aborting\n");
917                 ret = -EIO;
918                 goto err_free;
919         }
920
921         list_add_tail(&tunnel->list, &tcm->tunnel_list);
922         if (tb_route(parent))
923                 tb_reclaim_usb3_bandwidth(tb, down, up);
924
925         return 0;
926
927 err_free:
928         tb_tunnel_free(tunnel);
929 err_reclaim:
930         if (tb_route(parent))
931                 tb_reclaim_usb3_bandwidth(tb, down, up);
932
933         return ret;
934 }
935
936 static int tb_create_usb3_tunnels(struct tb_switch *sw)
937 {
938         struct tb_port *port;
939         int ret;
940
941         if (!tb_acpi_may_tunnel_usb3())
942                 return 0;
943
944         if (tb_route(sw)) {
945                 ret = tb_tunnel_usb3(sw->tb, sw);
946                 if (ret)
947                         return ret;
948         }
949
950         tb_switch_for_each_port(sw, port) {
951                 if (!tb_port_has_remote(port))
952                         continue;
953                 ret = tb_create_usb3_tunnels(port->remote->sw);
954                 if (ret)
955                         return ret;
956         }
957
958         return 0;
959 }
960
961 /**
962  * tb_configure_asym() - Transition links to asymmetric if needed
963  * @tb: Domain structure
964  * @src_port: Source adapter to start the transition
965  * @dst_port: Destination adapter
966  * @requested_up: Additional bandwidth (Mb/s) required upstream
967  * @requested_down: Additional bandwidth (Mb/s) required downstream
968  *
969  * Transition links between @src_port and @dst_port into asymmetric, with
970  * three lanes in the direction from @src_port towards @dst_port and one lane
971  * in the opposite direction, if the bandwidth requirements
972  * (requested + currently consumed) on that link exceed @asym_threshold.
973  *
974  * Must be called with available >= requested over all links.
975  */
976 static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
977                              struct tb_port *dst_port, int requested_up,
978                              int requested_down)
979 {
980         bool clx = false, clx_disabled = false, downstream;
981         struct tb_switch *sw;
982         struct tb_port *up;
983         int ret = 0;
984
985         if (!asym_threshold)
986                 return 0;
987
988         downstream = tb_port_path_direction_downstream(src_port, dst_port);
989         /* Pick up router deepest in the hierarchy */
990         if (downstream)
991                 sw = dst_port->sw;
992         else
993                 sw = src_port->sw;
994
995         tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
996                 struct tb_port *down = tb_switch_downstream_port(up->sw);
997                 enum tb_link_width width_up, width_down;
998                 int consumed_up, consumed_down;
999
1000                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1001                                                &consumed_up, &consumed_down);
1002                 if (ret)
1003                         break;
1004
1005                 if (downstream) {
1006                         /*
1007                          * Downstream so make sure upstream is within the 36G
1008                          * (40G - guard band 10%), and the requested is above
1009                          * what the threshold is.
1010                          */
1011                         if (consumed_up + requested_up >= TB_ASYM_MIN) {
1012                                 ret = -ENOBUFS;
1013                                 break;
1014                         }
1015                         /* Does consumed + requested exceed the threshold */
1016                         if (consumed_down + requested_down < asym_threshold)
1017                                 continue;
1018
1019                         width_up = TB_LINK_WIDTH_ASYM_RX;
1020                         width_down = TB_LINK_WIDTH_ASYM_TX;
1021                 } else {
1022                         /* Upstream, the opposite of above */
1023                         if (consumed_down + requested_down >= TB_ASYM_MIN) {
1024                                 ret = -ENOBUFS;
1025                                 break;
1026                         }
1027                         if (consumed_up + requested_up < asym_threshold)
1028                                 continue;
1029
1030                         width_up = TB_LINK_WIDTH_ASYM_TX;
1031                         width_down = TB_LINK_WIDTH_ASYM_RX;
1032                 }
1033
1034                 if (up->sw->link_width == width_up)
1035                         continue;
1036
1037                 if (!tb_port_width_supported(up, width_up) ||
1038                     !tb_port_width_supported(down, width_down))
1039                         continue;
1040
1041                 /*
1042                  * Disable CL states before doing any transitions. We
1043                  * delayed it until now that we know there is a real
1044                  * transition taking place.
1045                  */
1046                 if (!clx_disabled) {
1047                         clx = tb_disable_clx(sw);
1048                         clx_disabled = true;
1049                 }
1050
1051                 tb_sw_dbg(up->sw, "configuring asymmetric link\n");
1052
1053                 /*
1054                  * Here requested + consumed > threshold so we need to
1055                  * transtion the link into asymmetric now.
1056                  */
1057                 ret = tb_switch_set_link_width(up->sw, width_up);
1058                 if (ret) {
1059                         tb_sw_warn(up->sw, "failed to set link width\n");
1060                         break;
1061                 }
1062         }
1063
1064         /* Re-enable CL states if they were previosly enabled */
1065         if (clx)
1066                 tb_enable_clx(sw);
1067
1068         return ret;
1069 }
1070
1071 /**
1072  * tb_configure_sym() - Transition links to symmetric if possible
1073  * @tb: Domain structure
1074  * @src_port: Source adapter to start the transition
1075  * @dst_port: Destination adapter
1076  * @keep_asym: Keep asymmetric link if preferred
1077  *
1078  * Goes over each link from @src_port to @dst_port and tries to
1079  * transition the link to symmetric if the currently consumed bandwidth
1080  * allows and link asymmetric preference is ignored (if @keep_asym is %false).
1081  */
1082 static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
1083                             struct tb_port *dst_port, bool keep_asym)
1084 {
1085         bool clx = false, clx_disabled = false, downstream;
1086         struct tb_switch *sw;
1087         struct tb_port *up;
1088         int ret = 0;
1089
1090         if (!asym_threshold)
1091                 return 0;
1092
1093         downstream = tb_port_path_direction_downstream(src_port, dst_port);
1094         /* Pick up router deepest in the hierarchy */
1095         if (downstream)
1096                 sw = dst_port->sw;
1097         else
1098                 sw = src_port->sw;
1099
1100         tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1101                 int consumed_up, consumed_down;
1102
1103                 /* Already symmetric */
1104                 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
1105                         continue;
1106                 /* Unplugged, no need to switch */
1107                 if (up->sw->is_unplugged)
1108                         continue;
1109
1110                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1111                                                &consumed_up, &consumed_down);
1112                 if (ret)
1113                         break;
1114
1115                 if (downstream) {
1116                         /*
1117                          * Downstream so we want the consumed_down < threshold.
1118                          * Upstream traffic should be less than 36G (40G
1119                          * guard band 10%) as the link was configured asymmetric
1120                          * already.
1121                          */
1122                         if (consumed_down >= asym_threshold)
1123                                 continue;
1124                 } else {
1125                         if (consumed_up >= asym_threshold)
1126                                 continue;
1127                 }
1128
1129                 if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
1130                         continue;
1131
1132                 /*
1133                  * Here consumed < threshold so we can transition the
1134                  * link to symmetric.
1135                  *
1136                  * However, if the router prefers asymmetric link we
1137                  * honor that (unless @keep_asym is %false).
1138                  */
1139                 if (keep_asym &&
1140                     up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) {
1141                         tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n");
1142                         continue;
1143                 }
1144
1145                 /* Disable CL states before doing any transitions */
1146                 if (!clx_disabled) {
1147                         clx = tb_disable_clx(sw);
1148                         clx_disabled = true;
1149                 }
1150
1151                 tb_sw_dbg(up->sw, "configuring symmetric link\n");
1152
1153                 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
1154                 if (ret) {
1155                         tb_sw_warn(up->sw, "failed to set link width\n");
1156                         break;
1157                 }
1158         }
1159
1160         /* Re-enable CL states if they were previosly enabled */
1161         if (clx)
1162                 tb_enable_clx(sw);
1163
1164         return ret;
1165 }
1166
1167 static void tb_configure_link(struct tb_port *down, struct tb_port *up,
1168                               struct tb_switch *sw)
1169 {
1170         struct tb *tb = sw->tb;
1171
1172         /* Link the routers using both links if available */
1173         down->remote = up;
1174         up->remote = down;
1175         if (down->dual_link_port && up->dual_link_port) {
1176                 down->dual_link_port->remote = up->dual_link_port;
1177                 up->dual_link_port->remote = down->dual_link_port;
1178         }
1179
1180         /*
1181          * Enable lane bonding if the link is currently two single lane
1182          * links.
1183          */
1184         if (sw->link_width < TB_LINK_WIDTH_DUAL)
1185                 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
1186
1187         /*
1188          * Device router that comes up as symmetric link is
1189          * connected deeper in the hierarchy, we transition the links
1190          * above into symmetric if bandwidth allows.
1191          */
1192         if (tb_switch_depth(sw) > 1 &&
1193             tb_port_get_link_generation(up) >= 4 &&
1194             up->sw->link_width == TB_LINK_WIDTH_DUAL) {
1195                 struct tb_port *host_port;
1196
1197                 host_port = tb_port_at(tb_route(sw), tb->root_switch);
1198                 tb_configure_sym(tb, host_port, up, false);
1199         }
1200
1201         /* Set the link configured */
1202         tb_switch_configure_link(sw);
1203 }
1204
1205 static void tb_scan_port(struct tb_port *port);
1206
1207 /*
1208  * tb_scan_switch() - scan for and initialize downstream switches
1209  */
1210 static void tb_scan_switch(struct tb_switch *sw)
1211 {
1212         struct tb_port *port;
1213
1214         pm_runtime_get_sync(&sw->dev);
1215
1216         tb_switch_for_each_port(sw, port)
1217                 tb_scan_port(port);
1218
1219         pm_runtime_mark_last_busy(&sw->dev);
1220         pm_runtime_put_autosuspend(&sw->dev);
1221 }
1222
1223 /*
1224  * tb_scan_port() - check for and initialize switches below port
1225  */
1226 static void tb_scan_port(struct tb_port *port)
1227 {
1228         struct tb_cm *tcm = tb_priv(port->sw->tb);
1229         struct tb_port *upstream_port;
1230         bool discovery = false;
1231         struct tb_switch *sw;
1232
1233         if (tb_is_upstream_port(port))
1234                 return;
1235
1236         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
1237             !tb_dp_port_is_enabled(port)) {
1238                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
1239                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
1240                                  false);
1241                 return;
1242         }
1243
1244         if (port->config.type != TB_TYPE_PORT)
1245                 return;
1246         if (port->dual_link_port && port->link_nr)
1247                 return; /*
1248                          * Downstream switch is reachable through two ports.
1249                          * Only scan on the primary port (link_nr == 0).
1250                          */
1251
1252         if (port->usb4)
1253                 pm_runtime_get_sync(&port->usb4->dev);
1254
1255         if (tb_wait_for_port(port, false) <= 0)
1256                 goto out_rpm_put;
1257         if (port->remote) {
1258                 tb_port_dbg(port, "port already has a remote\n");
1259                 goto out_rpm_put;
1260         }
1261
1262         tb_retimer_scan(port, true);
1263
1264         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
1265                              tb_downstream_route(port));
1266         if (IS_ERR(sw)) {
1267                 /*
1268                  * If there is an error accessing the connected switch
1269                  * it may be connected to another domain. Also we allow
1270                  * the other domain to be connected to a max depth switch.
1271                  */
1272                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
1273                         tb_scan_xdomain(port);
1274                 goto out_rpm_put;
1275         }
1276
1277         if (tb_switch_configure(sw)) {
1278                 tb_switch_put(sw);
1279                 goto out_rpm_put;
1280         }
1281
1282         /*
1283          * If there was previously another domain connected remove it
1284          * first.
1285          */
1286         if (port->xdomain) {
1287                 tb_xdomain_remove(port->xdomain);
1288                 tb_port_unconfigure_xdomain(port);
1289                 port->xdomain = NULL;
1290         }
1291
1292         /*
1293          * Do not send uevents until we have discovered all existing
1294          * tunnels and know which switches were authorized already by
1295          * the boot firmware.
1296          */
1297         if (!tcm->hotplug_active) {
1298                 dev_set_uevent_suppress(&sw->dev, true);
1299                 discovery = true;
1300         }
1301
1302         /*
1303          * At the moment Thunderbolt 2 and beyond (devices with LC) we
1304          * can support runtime PM.
1305          */
1306         sw->rpm = sw->generation > 1;
1307
1308         if (tb_switch_add(sw)) {
1309                 tb_switch_put(sw);
1310                 goto out_rpm_put;
1311         }
1312
1313         upstream_port = tb_upstream_port(sw);
1314         tb_configure_link(port, upstream_port, sw);
1315
1316         /*
1317          * CL0s and CL1 are enabled and supported together.
1318          * Silently ignore CLx enabling in case CLx is not supported.
1319          */
1320         if (discovery)
1321                 tb_sw_dbg(sw, "discovery, not touching CL states\n");
1322         else if (tb_enable_clx(sw))
1323                 tb_sw_warn(sw, "failed to enable CL states\n");
1324
1325         if (tb_enable_tmu(sw))
1326                 tb_sw_warn(sw, "failed to enable TMU\n");
1327
1328         /*
1329          * Configuration valid needs to be set after the TMU has been
1330          * enabled for the upstream port of the router so we do it here.
1331          */
1332         tb_switch_configuration_valid(sw);
1333
1334         /* Scan upstream retimers */
1335         tb_retimer_scan(upstream_port, true);
1336
1337         /*
1338          * Create USB 3.x tunnels only when the switch is plugged to the
1339          * domain. This is because we scan the domain also during discovery
1340          * and want to discover existing USB 3.x tunnels before we create
1341          * any new.
1342          */
1343         if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1344                 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1345
1346         tb_add_dp_resources(sw);
1347         tb_scan_switch(sw);
1348
1349 out_rpm_put:
1350         if (port->usb4) {
1351                 pm_runtime_mark_last_busy(&port->usb4->dev);
1352                 pm_runtime_put_autosuspend(&port->usb4->dev);
1353         }
1354 }
1355
1356 static void
1357 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1358 {
1359         struct tb_tunnel *first_tunnel;
1360         struct tb *tb = group->tb;
1361         struct tb_port *in;
1362         int ret;
1363
1364         tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1365                group->index);
1366
1367         first_tunnel = NULL;
1368         list_for_each_entry(in, &group->ports, group_list) {
1369                 int estimated_bw, estimated_up, estimated_down;
1370                 struct tb_tunnel *tunnel;
1371                 struct tb_port *out;
1372
1373                 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1374                         continue;
1375
1376                 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1377                 if (WARN_ON(!tunnel))
1378                         break;
1379
1380                 if (!first_tunnel) {
1381                         /*
1382                          * Since USB3 bandwidth is shared by all DP
1383                          * tunnels under the host router USB4 port, even
1384                          * if they do not begin from the host router, we
1385                          * can release USB3 bandwidth just once and not
1386                          * for each tunnel separately.
1387                          */
1388                         first_tunnel = tunnel;
1389                         ret = tb_release_unused_usb3_bandwidth(tb,
1390                                 first_tunnel->src_port, first_tunnel->dst_port);
1391                         if (ret) {
1392                                 tb_tunnel_warn(tunnel,
1393                                         "failed to release unused bandwidth\n");
1394                                 break;
1395                         }
1396                 }
1397
1398                 out = tunnel->dst_port;
1399                 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1400                                              &estimated_down, true);
1401                 if (ret) {
1402                         tb_tunnel_warn(tunnel,
1403                                 "failed to re-calculate estimated bandwidth\n");
1404                         break;
1405                 }
1406
1407                 /*
1408                  * Estimated bandwidth includes:
1409                  *  - already allocated bandwidth for the DP tunnel
1410                  *  - available bandwidth along the path
1411                  *  - bandwidth allocated for USB 3.x but not used.
1412                  */
1413                 if (tb_tunnel_direction_downstream(tunnel))
1414                         estimated_bw = estimated_down;
1415                 else
1416                         estimated_bw = estimated_up;
1417
1418                 /*
1419                  * If there is reserved bandwidth for the group that is
1420                  * not yet released we report that too.
1421                  */
1422                 tb_tunnel_dbg(tunnel,
1423                               "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n",
1424                               estimated_bw, group->reserved,
1425                               estimated_bw + group->reserved);
1426
1427                 if (usb4_dp_port_set_estimated_bandwidth(in,
1428                                 estimated_bw + group->reserved))
1429                         tb_tunnel_warn(tunnel,
1430                                        "failed to update estimated bandwidth\n");
1431         }
1432
1433         if (first_tunnel)
1434                 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1435                                           first_tunnel->dst_port);
1436
1437         tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1438 }
1439
1440 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1441 {
1442         struct tb_cm *tcm = tb_priv(tb);
1443         int i;
1444
1445         tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1446
1447         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1448                 struct tb_bandwidth_group *group = &tcm->groups[i];
1449
1450                 if (!list_empty(&group->ports))
1451                         tb_recalc_estimated_bandwidth_for_group(group);
1452         }
1453
1454         tb_dbg(tb, "bandwidth re-calculation done\n");
1455 }
1456
1457 static bool __release_group_bandwidth(struct tb_bandwidth_group *group)
1458 {
1459         if (group->reserved) {
1460                 tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index,
1461                         group->reserved);
1462                 group->reserved = 0;
1463                 return true;
1464         }
1465         return false;
1466 }
1467
1468 static void __configure_group_sym(struct tb_bandwidth_group *group)
1469 {
1470         struct tb_tunnel *tunnel;
1471         struct tb_port *in;
1472
1473         if (list_empty(&group->ports))
1474                 return;
1475
1476         /*
1477          * All the tunnels in the group go through the same USB4 links
1478          * so we find the first one here and pass the IN and OUT
1479          * adapters to tb_configure_sym() which now transitions the
1480          * links back to symmetric if bandwidth requirement < asym_threshold.
1481          *
1482          * We do this here to avoid unnecessary transitions (for example
1483          * if the graphics released bandwidth for other tunnel in the
1484          * same group).
1485          */
1486         in = list_first_entry(&group->ports, struct tb_port, group_list);
1487         tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL);
1488         if (tunnel)
1489                 tb_configure_sym(group->tb, in, tunnel->dst_port, true);
1490 }
1491
1492 static void tb_bandwidth_group_release_work(struct work_struct *work)
1493 {
1494         struct tb_bandwidth_group *group =
1495                 container_of(work, typeof(*group), release_work.work);
1496         struct tb *tb = group->tb;
1497
1498         mutex_lock(&tb->lock);
1499         if (__release_group_bandwidth(group))
1500                 tb_recalc_estimated_bandwidth(tb);
1501         __configure_group_sym(group);
1502         mutex_unlock(&tb->lock);
1503 }
1504
1505 static void tb_init_bandwidth_groups(struct tb_cm *tcm)
1506 {
1507         int i;
1508
1509         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1510                 struct tb_bandwidth_group *group = &tcm->groups[i];
1511
1512                 group->tb = tcm_to_tb(tcm);
1513                 group->index = i + 1;
1514                 INIT_LIST_HEAD(&group->ports);
1515                 INIT_DELAYED_WORK(&group->release_work,
1516                                   tb_bandwidth_group_release_work);
1517         }
1518 }
1519
1520 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
1521                                            struct tb_port *in)
1522 {
1523         if (!group || WARN_ON(in->group))
1524                 return;
1525
1526         in->group = group;
1527         list_add_tail(&in->group_list, &group->ports);
1528
1529         tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
1530 }
1531
1532 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
1533 {
1534         int i;
1535
1536         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1537                 struct tb_bandwidth_group *group = &tcm->groups[i];
1538
1539                 if (list_empty(&group->ports))
1540                         return group;
1541         }
1542
1543         return NULL;
1544 }
1545
1546 static struct tb_bandwidth_group *
1547 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1548                           struct tb_port *out)
1549 {
1550         struct tb_bandwidth_group *group;
1551         struct tb_tunnel *tunnel;
1552
1553         /*
1554          * Find all DP tunnels that go through all the same USB4 links
1555          * as this one. Because we always setup tunnels the same way we
1556          * can just check for the routers at both ends of the tunnels
1557          * and if they are the same we have a match.
1558          */
1559         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1560                 if (!tb_tunnel_is_dp(tunnel))
1561                         continue;
1562
1563                 if (tunnel->src_port->sw == in->sw &&
1564                     tunnel->dst_port->sw == out->sw) {
1565                         group = tunnel->src_port->group;
1566                         if (group) {
1567                                 tb_bandwidth_group_attach_port(group, in);
1568                                 return group;
1569                         }
1570                 }
1571         }
1572
1573         /* Pick up next available group then */
1574         group = tb_find_free_bandwidth_group(tcm);
1575         if (group)
1576                 tb_bandwidth_group_attach_port(group, in);
1577         else
1578                 tb_port_warn(in, "no available bandwidth groups\n");
1579
1580         return group;
1581 }
1582
1583 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1584                                         struct tb_port *out)
1585 {
1586         if (usb4_dp_port_bandwidth_mode_enabled(in)) {
1587                 int index, i;
1588
1589                 index = usb4_dp_port_group_id(in);
1590                 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1591                         if (tcm->groups[i].index == index) {
1592                                 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
1593                                 return;
1594                         }
1595                 }
1596         }
1597
1598         tb_attach_bandwidth_group(tcm, in, out);
1599 }
1600
1601 static void tb_detach_bandwidth_group(struct tb_port *in)
1602 {
1603         struct tb_bandwidth_group *group = in->group;
1604
1605         if (group) {
1606                 in->group = NULL;
1607                 list_del_init(&in->group_list);
1608
1609                 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
1610
1611                 /* No more tunnels so release the reserved bandwidth if any */
1612                 if (list_empty(&group->ports)) {
1613                         cancel_delayed_work(&group->release_work);
1614                         __release_group_bandwidth(group);
1615                 }
1616         }
1617 }
1618
1619 static void tb_discover_tunnels(struct tb *tb)
1620 {
1621         struct tb_cm *tcm = tb_priv(tb);
1622         struct tb_tunnel *tunnel;
1623
1624         tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
1625
1626         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1627                 if (tb_tunnel_is_pci(tunnel)) {
1628                         struct tb_switch *parent = tunnel->dst_port->sw;
1629
1630                         while (parent != tunnel->src_port->sw) {
1631                                 parent->boot = true;
1632                                 parent = tb_switch_parent(parent);
1633                         }
1634                 } else if (tb_tunnel_is_dp(tunnel)) {
1635                         struct tb_port *in = tunnel->src_port;
1636                         struct tb_port *out = tunnel->dst_port;
1637
1638                         /* Keep the domain from powering down */
1639                         pm_runtime_get_sync(&in->sw->dev);
1640                         pm_runtime_get_sync(&out->sw->dev);
1641
1642                         tb_discover_bandwidth_group(tcm, in, out);
1643                 }
1644         }
1645 }
1646
1647 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
1648 {
1649         struct tb_port *src_port, *dst_port;
1650         struct tb *tb;
1651
1652         if (!tunnel)
1653                 return;
1654
1655         tb_tunnel_deactivate(tunnel);
1656         list_del(&tunnel->list);
1657
1658         tb = tunnel->tb;
1659         src_port = tunnel->src_port;
1660         dst_port = tunnel->dst_port;
1661
1662         switch (tunnel->type) {
1663         case TB_TUNNEL_DP:
1664                 tb_detach_bandwidth_group(src_port);
1665                 /*
1666                  * In case of DP tunnel make sure the DP IN resource is
1667                  * deallocated properly.
1668                  */
1669                 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
1670                 /*
1671                  * If bandwidth on a link is < asym_threshold
1672                  * transition the link to symmetric.
1673                  */
1674                 tb_configure_sym(tb, src_port, dst_port, true);
1675                 /* Now we can allow the domain to runtime suspend again */
1676                 pm_runtime_mark_last_busy(&dst_port->sw->dev);
1677                 pm_runtime_put_autosuspend(&dst_port->sw->dev);
1678                 pm_runtime_mark_last_busy(&src_port->sw->dev);
1679                 pm_runtime_put_autosuspend(&src_port->sw->dev);
1680                 fallthrough;
1681
1682         case TB_TUNNEL_USB3:
1683                 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
1684                 break;
1685
1686         default:
1687                 /*
1688                  * PCIe and DMA tunnels do not consume guaranteed
1689                  * bandwidth.
1690                  */
1691                 break;
1692         }
1693
1694         tb_tunnel_free(tunnel);
1695 }
1696
1697 /*
1698  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
1699  */
1700 static void tb_free_invalid_tunnels(struct tb *tb)
1701 {
1702         struct tb_cm *tcm = tb_priv(tb);
1703         struct tb_tunnel *tunnel;
1704         struct tb_tunnel *n;
1705
1706         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1707                 if (tb_tunnel_is_invalid(tunnel))
1708                         tb_deactivate_and_free_tunnel(tunnel);
1709         }
1710 }
1711
1712 /*
1713  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
1714  */
1715 static void tb_free_unplugged_children(struct tb_switch *sw)
1716 {
1717         struct tb_port *port;
1718
1719         tb_switch_for_each_port(sw, port) {
1720                 if (!tb_port_has_remote(port))
1721                         continue;
1722
1723                 if (port->remote->sw->is_unplugged) {
1724                         tb_retimer_remove_all(port);
1725                         tb_remove_dp_resources(port->remote->sw);
1726                         tb_switch_unconfigure_link(port->remote->sw);
1727                         tb_switch_set_link_width(port->remote->sw,
1728                                                  TB_LINK_WIDTH_SINGLE);
1729                         tb_switch_remove(port->remote->sw);
1730                         port->remote = NULL;
1731                         if (port->dual_link_port)
1732                                 port->dual_link_port->remote = NULL;
1733                 } else {
1734                         tb_free_unplugged_children(port->remote->sw);
1735                 }
1736         }
1737 }
1738
1739 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
1740                                          const struct tb_port *port)
1741 {
1742         struct tb_port *down = NULL;
1743
1744         /*
1745          * To keep plugging devices consistently in the same PCIe
1746          * hierarchy, do mapping here for switch downstream PCIe ports.
1747          */
1748         if (tb_switch_is_usb4(sw)) {
1749                 down = usb4_switch_map_pcie_down(sw, port);
1750         } else if (!tb_route(sw)) {
1751                 int phy_port = tb_phy_port_from_link(port->port);
1752                 int index;
1753
1754                 /*
1755                  * Hard-coded Thunderbolt port to PCIe down port mapping
1756                  * per controller.
1757                  */
1758                 if (tb_switch_is_cactus_ridge(sw) ||
1759                     tb_switch_is_alpine_ridge(sw))
1760                         index = !phy_port ? 6 : 7;
1761                 else if (tb_switch_is_falcon_ridge(sw))
1762                         index = !phy_port ? 6 : 8;
1763                 else if (tb_switch_is_titan_ridge(sw))
1764                         index = !phy_port ? 8 : 9;
1765                 else
1766                         goto out;
1767
1768                 /* Validate the hard-coding */
1769                 if (WARN_ON(index > sw->config.max_port_number))
1770                         goto out;
1771
1772                 down = &sw->ports[index];
1773         }
1774
1775         if (down) {
1776                 if (WARN_ON(!tb_port_is_pcie_down(down)))
1777                         goto out;
1778                 if (tb_pci_port_is_enabled(down))
1779                         goto out;
1780
1781                 return down;
1782         }
1783
1784 out:
1785         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
1786 }
1787
1788 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1789 {
1790         struct tb_port *host_port, *port;
1791         struct tb_cm *tcm = tb_priv(tb);
1792
1793         host_port = tb_route(in->sw) ?
1794                 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1795
1796         list_for_each_entry(port, &tcm->dp_resources, list) {
1797                 if (!tb_port_is_dpout(port))
1798                         continue;
1799
1800                 if (tb_port_is_enabled(port)) {
1801                         tb_port_dbg(port, "DP OUT in use\n");
1802                         continue;
1803                 }
1804
1805                 /* Needs to be on different routers */
1806                 if (in->sw == port->sw) {
1807                         tb_port_dbg(port, "skipping DP OUT on same router\n");
1808                         continue;
1809                 }
1810
1811                 tb_port_dbg(port, "DP OUT available\n");
1812
1813                 /*
1814                  * Keep the DP tunnel under the topology starting from
1815                  * the same host router downstream port.
1816                  */
1817                 if (host_port && tb_route(port->sw)) {
1818                         struct tb_port *p;
1819
1820                         p = tb_port_at(tb_route(port->sw), tb->root_switch);
1821                         if (p != host_port)
1822                                 continue;
1823                 }
1824
1825                 return port;
1826         }
1827
1828         return NULL;
1829 }
1830
1831 static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
1832                              struct tb_port *out)
1833 {
1834         int available_up, available_down, ret, link_nr;
1835         struct tb_cm *tcm = tb_priv(tb);
1836         int consumed_up, consumed_down;
1837         struct tb_tunnel *tunnel;
1838
1839         /*
1840          * This is only applicable to links that are not bonded (so
1841          * when Thunderbolt 1 hardware is involved somewhere in the
1842          * topology). For these try to share the DP bandwidth between
1843          * the two lanes.
1844          */
1845         link_nr = 1;
1846         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1847                 if (tb_tunnel_is_dp(tunnel)) {
1848                         link_nr = 0;
1849                         break;
1850                 }
1851         }
1852
1853         /*
1854          * DP stream needs the domain to be active so runtime resume
1855          * both ends of the tunnel.
1856          *
1857          * This should bring the routers in the middle active as well
1858          * and keeps the domain from runtime suspending while the DP
1859          * tunnel is active.
1860          */
1861         pm_runtime_get_sync(&in->sw->dev);
1862         pm_runtime_get_sync(&out->sw->dev);
1863
1864         if (tb_switch_alloc_dp_resource(in->sw, in)) {
1865                 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
1866                 goto err_rpm_put;
1867         }
1868
1869         if (!tb_attach_bandwidth_group(tcm, in, out))
1870                 goto err_dealloc_dp;
1871
1872         /* Make all unused USB3 bandwidth available for the new DP tunnel */
1873         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1874         if (ret) {
1875                 tb_warn(tb, "failed to release unused bandwidth\n");
1876                 goto err_detach_group;
1877         }
1878
1879         ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
1880                                      true);
1881         if (ret)
1882                 goto err_reclaim_usb;
1883
1884         tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1885                available_up, available_down);
1886
1887         tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1888                                     available_down);
1889         if (!tunnel) {
1890                 tb_port_dbg(out, "could not allocate DP tunnel\n");
1891                 goto err_reclaim_usb;
1892         }
1893
1894         if (tb_tunnel_activate(tunnel)) {
1895                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1896                 goto err_free;
1897         }
1898
1899         /* If fail reading tunnel's consumed bandwidth, tear it down */
1900         ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
1901         if (ret)
1902                 goto err_deactivate;
1903
1904         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1905
1906         tb_reclaim_usb3_bandwidth(tb, in, out);
1907         /*
1908          * Transition the links to asymmetric if the consumption exceeds
1909          * the threshold.
1910          */
1911         tb_configure_asym(tb, in, out, consumed_up, consumed_down);
1912
1913         /* Update the domain with the new bandwidth estimation */
1914         tb_recalc_estimated_bandwidth(tb);
1915
1916         /*
1917          * In case of DP tunnel exists, change host router's 1st children
1918          * TMU mode to HiFi for CL0s to work.
1919          */
1920         tb_increase_tmu_accuracy(tunnel);
1921         return true;
1922
1923 err_deactivate:
1924         tb_tunnel_deactivate(tunnel);
1925 err_free:
1926         tb_tunnel_free(tunnel);
1927 err_reclaim_usb:
1928         tb_reclaim_usb3_bandwidth(tb, in, out);
1929 err_detach_group:
1930         tb_detach_bandwidth_group(in);
1931 err_dealloc_dp:
1932         tb_switch_dealloc_dp_resource(in->sw, in);
1933 err_rpm_put:
1934         pm_runtime_mark_last_busy(&out->sw->dev);
1935         pm_runtime_put_autosuspend(&out->sw->dev);
1936         pm_runtime_mark_last_busy(&in->sw->dev);
1937         pm_runtime_put_autosuspend(&in->sw->dev);
1938
1939         return false;
1940 }
1941
1942 static void tb_tunnel_dp(struct tb *tb)
1943 {
1944         struct tb_cm *tcm = tb_priv(tb);
1945         struct tb_port *port, *in, *out;
1946
1947         if (!tb_acpi_may_tunnel_dp()) {
1948                 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1949                 return;
1950         }
1951
1952         /*
1953          * Find pair of inactive DP IN and DP OUT adapters and then
1954          * establish a DP tunnel between them.
1955          */
1956         tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1957
1958         in = NULL;
1959         out = NULL;
1960         list_for_each_entry(port, &tcm->dp_resources, list) {
1961                 if (!tb_port_is_dpin(port))
1962                         continue;
1963
1964                 if (tb_port_is_enabled(port)) {
1965                         tb_port_dbg(port, "DP IN in use\n");
1966                         continue;
1967                 }
1968
1969                 in = port;
1970                 tb_port_dbg(in, "DP IN available\n");
1971
1972                 out = tb_find_dp_out(tb, port);
1973                 if (out)
1974                         tb_tunnel_one_dp(tb, in, out);
1975                 else
1976                         tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
1977         }
1978
1979         if (!in)
1980                 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1981 }
1982
1983 static void tb_enter_redrive(struct tb_port *port)
1984 {
1985         struct tb_switch *sw = port->sw;
1986
1987         if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
1988                 return;
1989
1990         /*
1991          * If we get hot-unplug for the DP IN port of the host router
1992          * and the DP resource is not available anymore it means there
1993          * is a monitor connected directly to the Type-C port and we are
1994          * in "redrive" mode. For this to work we cannot enter RTD3 so
1995          * we bump up the runtime PM reference count here.
1996          */
1997         if (!tb_port_is_dpin(port))
1998                 return;
1999         if (tb_route(sw))
2000                 return;
2001         if (!tb_switch_query_dp_resource(sw, port)) {
2002                 port->redrive = true;
2003                 pm_runtime_get(&sw->dev);
2004                 tb_port_dbg(port, "enter redrive mode, keeping powered\n");
2005         }
2006 }
2007
2008 static void tb_exit_redrive(struct tb_port *port)
2009 {
2010         struct tb_switch *sw = port->sw;
2011
2012         if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
2013                 return;
2014
2015         if (!tb_port_is_dpin(port))
2016                 return;
2017         if (tb_route(sw))
2018                 return;
2019         if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
2020                 port->redrive = false;
2021                 pm_runtime_put(&sw->dev);
2022                 tb_port_dbg(port, "exit redrive mode\n");
2023         }
2024 }
2025
2026 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
2027 {
2028         struct tb_port *in, *out;
2029         struct tb_tunnel *tunnel;
2030
2031         if (tb_port_is_dpin(port)) {
2032                 tb_port_dbg(port, "DP IN resource unavailable\n");
2033                 in = port;
2034                 out = NULL;
2035         } else {
2036                 tb_port_dbg(port, "DP OUT resource unavailable\n");
2037                 in = NULL;
2038                 out = port;
2039         }
2040
2041         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
2042         if (tunnel)
2043                 tb_deactivate_and_free_tunnel(tunnel);
2044         else
2045                 tb_enter_redrive(port);
2046         list_del_init(&port->list);
2047
2048         /*
2049          * See if there is another DP OUT port that can be used for
2050          * to create another tunnel.
2051          */
2052         tb_recalc_estimated_bandwidth(tb);
2053         tb_tunnel_dp(tb);
2054 }
2055
2056 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
2057 {
2058         struct tb_cm *tcm = tb_priv(tb);
2059         struct tb_port *p;
2060
2061         if (tb_port_is_enabled(port))
2062                 return;
2063
2064         list_for_each_entry(p, &tcm->dp_resources, list) {
2065                 if (p == port)
2066                         return;
2067         }
2068
2069         tb_port_dbg(port, "DP %s resource available after hotplug\n",
2070                     tb_port_is_dpin(port) ? "IN" : "OUT");
2071         list_add_tail(&port->list, &tcm->dp_resources);
2072         tb_exit_redrive(port);
2073
2074         /* Look for suitable DP IN <-> DP OUT pairs now */
2075         tb_tunnel_dp(tb);
2076 }
2077
2078 static void tb_disconnect_and_release_dp(struct tb *tb)
2079 {
2080         struct tb_cm *tcm = tb_priv(tb);
2081         struct tb_tunnel *tunnel, *n;
2082
2083         /*
2084          * Tear down all DP tunnels and release their resources. They
2085          * will be re-established after resume based on plug events.
2086          */
2087         list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
2088                 if (tb_tunnel_is_dp(tunnel))
2089                         tb_deactivate_and_free_tunnel(tunnel);
2090         }
2091
2092         while (!list_empty(&tcm->dp_resources)) {
2093                 struct tb_port *port;
2094
2095                 port = list_first_entry(&tcm->dp_resources,
2096                                         struct tb_port, list);
2097                 list_del_init(&port->list);
2098         }
2099 }
2100
2101 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
2102 {
2103         struct tb_tunnel *tunnel;
2104         struct tb_port *up;
2105
2106         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
2107         if (WARN_ON(!up))
2108                 return -ENODEV;
2109
2110         tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
2111         if (WARN_ON(!tunnel))
2112                 return -ENODEV;
2113
2114         tb_switch_xhci_disconnect(sw);
2115
2116         tb_tunnel_deactivate(tunnel);
2117         list_del(&tunnel->list);
2118         tb_tunnel_free(tunnel);
2119         return 0;
2120 }
2121
2122 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
2123 {
2124         struct tb_port *up, *down, *port;
2125         struct tb_cm *tcm = tb_priv(tb);
2126         struct tb_tunnel *tunnel;
2127
2128         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
2129         if (!up)
2130                 return 0;
2131
2132         /*
2133          * Look up available down port. Since we are chaining it should
2134          * be found right above this switch.
2135          */
2136         port = tb_switch_downstream_port(sw);
2137         down = tb_find_pcie_down(tb_switch_parent(sw), port);
2138         if (!down)
2139                 return 0;
2140
2141         tunnel = tb_tunnel_alloc_pci(tb, up, down);
2142         if (!tunnel)
2143                 return -ENOMEM;
2144
2145         if (tb_tunnel_activate(tunnel)) {
2146                 tb_port_info(up,
2147                              "PCIe tunnel activation failed, aborting\n");
2148                 tb_tunnel_free(tunnel);
2149                 return -EIO;
2150         }
2151
2152         /*
2153          * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
2154          * here.
2155          */
2156         if (tb_switch_pcie_l1_enable(sw))
2157                 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
2158
2159         if (tb_switch_xhci_connect(sw))
2160                 tb_sw_warn(sw, "failed to connect xHCI\n");
2161
2162         list_add_tail(&tunnel->list, &tcm->tunnel_list);
2163         return 0;
2164 }
2165
2166 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2167                                     int transmit_path, int transmit_ring,
2168                                     int receive_path, int receive_ring)
2169 {
2170         struct tb_cm *tcm = tb_priv(tb);
2171         struct tb_port *nhi_port, *dst_port;
2172         struct tb_tunnel *tunnel;
2173         struct tb_switch *sw;
2174         int ret;
2175
2176         sw = tb_to_switch(xd->dev.parent);
2177         dst_port = tb_port_at(xd->route, sw);
2178         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2179
2180         mutex_lock(&tb->lock);
2181
2182         /*
2183          * When tunneling DMA paths the link should not enter CL states
2184          * so disable them now.
2185          */
2186         tb_disable_clx(sw);
2187
2188         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
2189                                      transmit_ring, receive_path, receive_ring);
2190         if (!tunnel) {
2191                 ret = -ENOMEM;
2192                 goto err_clx;
2193         }
2194
2195         if (tb_tunnel_activate(tunnel)) {
2196                 tb_port_info(nhi_port,
2197                              "DMA tunnel activation failed, aborting\n");
2198                 ret = -EIO;
2199                 goto err_free;
2200         }
2201
2202         list_add_tail(&tunnel->list, &tcm->tunnel_list);
2203         mutex_unlock(&tb->lock);
2204         return 0;
2205
2206 err_free:
2207         tb_tunnel_free(tunnel);
2208 err_clx:
2209         tb_enable_clx(sw);
2210         mutex_unlock(&tb->lock);
2211
2212         return ret;
2213 }
2214
2215 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2216                                           int transmit_path, int transmit_ring,
2217                                           int receive_path, int receive_ring)
2218 {
2219         struct tb_cm *tcm = tb_priv(tb);
2220         struct tb_port *nhi_port, *dst_port;
2221         struct tb_tunnel *tunnel, *n;
2222         struct tb_switch *sw;
2223
2224         sw = tb_to_switch(xd->dev.parent);
2225         dst_port = tb_port_at(xd->route, sw);
2226         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2227
2228         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2229                 if (!tb_tunnel_is_dma(tunnel))
2230                         continue;
2231                 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
2232                         continue;
2233
2234                 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
2235                                         receive_path, receive_ring))
2236                         tb_deactivate_and_free_tunnel(tunnel);
2237         }
2238
2239         /*
2240          * Try to re-enable CL states now, it is OK if this fails
2241          * because we may still have another DMA tunnel active through
2242          * the same host router USB4 downstream port.
2243          */
2244         tb_enable_clx(sw);
2245 }
2246
2247 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2248                                        int transmit_path, int transmit_ring,
2249                                        int receive_path, int receive_ring)
2250 {
2251         if (!xd->is_unplugged) {
2252                 mutex_lock(&tb->lock);
2253                 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
2254                                               transmit_ring, receive_path,
2255                                               receive_ring);
2256                 mutex_unlock(&tb->lock);
2257         }
2258         return 0;
2259 }
2260
2261 /* hotplug handling */
2262
2263 /*
2264  * tb_handle_hotplug() - handle hotplug event
2265  *
2266  * Executes on tb->wq.
2267  */
2268 static void tb_handle_hotplug(struct work_struct *work)
2269 {
2270         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2271         struct tb *tb = ev->tb;
2272         struct tb_cm *tcm = tb_priv(tb);
2273         struct tb_switch *sw;
2274         struct tb_port *port;
2275
2276         /* Bring the domain back from sleep if it was suspended */
2277         pm_runtime_get_sync(&tb->dev);
2278
2279         mutex_lock(&tb->lock);
2280         if (!tcm->hotplug_active)
2281                 goto out; /* during init, suspend or shutdown */
2282
2283         sw = tb_switch_find_by_route(tb, ev->route);
2284         if (!sw) {
2285                 tb_warn(tb,
2286                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
2287                         ev->route, ev->port, ev->unplug);
2288                 goto out;
2289         }
2290         if (ev->port > sw->config.max_port_number) {
2291                 tb_warn(tb,
2292                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
2293                         ev->route, ev->port, ev->unplug);
2294                 goto put_sw;
2295         }
2296         port = &sw->ports[ev->port];
2297         if (tb_is_upstream_port(port)) {
2298                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
2299                        ev->route, ev->port, ev->unplug);
2300                 goto put_sw;
2301         }
2302
2303         pm_runtime_get_sync(&sw->dev);
2304
2305         if (ev->unplug) {
2306                 tb_retimer_remove_all(port);
2307
2308                 if (tb_port_has_remote(port)) {
2309                         tb_port_dbg(port, "switch unplugged\n");
2310                         tb_sw_set_unplugged(port->remote->sw);
2311                         tb_free_invalid_tunnels(tb);
2312                         tb_remove_dp_resources(port->remote->sw);
2313                         tb_switch_tmu_disable(port->remote->sw);
2314                         tb_switch_unconfigure_link(port->remote->sw);
2315                         tb_switch_set_link_width(port->remote->sw,
2316                                                  TB_LINK_WIDTH_SINGLE);
2317                         tb_switch_remove(port->remote->sw);
2318                         port->remote = NULL;
2319                         if (port->dual_link_port)
2320                                 port->dual_link_port->remote = NULL;
2321                         /* Maybe we can create another DP tunnel */
2322                         tb_recalc_estimated_bandwidth(tb);
2323                         tb_tunnel_dp(tb);
2324                 } else if (port->xdomain) {
2325                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
2326
2327                         tb_port_dbg(port, "xdomain unplugged\n");
2328                         /*
2329                          * Service drivers are unbound during
2330                          * tb_xdomain_remove() so setting XDomain as
2331                          * unplugged here prevents deadlock if they call
2332                          * tb_xdomain_disable_paths(). We will tear down
2333                          * all the tunnels below.
2334                          */
2335                         xd->is_unplugged = true;
2336                         tb_xdomain_remove(xd);
2337                         port->xdomain = NULL;
2338                         __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
2339                         tb_xdomain_put(xd);
2340                         tb_port_unconfigure_xdomain(port);
2341                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2342                         tb_dp_resource_unavailable(tb, port);
2343                 } else if (!port->port) {
2344                         tb_sw_dbg(sw, "xHCI disconnect request\n");
2345                         tb_switch_xhci_disconnect(sw);
2346                 } else {
2347                         tb_port_dbg(port,
2348                                    "got unplug event for disconnected port, ignoring\n");
2349                 }
2350         } else if (port->remote) {
2351                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
2352         } else if (!port->port && sw->authorized) {
2353                 tb_sw_dbg(sw, "xHCI connect request\n");
2354                 tb_switch_xhci_connect(sw);
2355         } else {
2356                 if (tb_port_is_null(port)) {
2357                         tb_port_dbg(port, "hotplug: scanning\n");
2358                         tb_scan_port(port);
2359                         if (!port->remote)
2360                                 tb_port_dbg(port, "hotplug: no switch found\n");
2361                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2362                         tb_dp_resource_available(tb, port);
2363                 }
2364         }
2365
2366         pm_runtime_mark_last_busy(&sw->dev);
2367         pm_runtime_put_autosuspend(&sw->dev);
2368
2369 put_sw:
2370         tb_switch_put(sw);
2371 out:
2372         mutex_unlock(&tb->lock);
2373
2374         pm_runtime_mark_last_busy(&tb->dev);
2375         pm_runtime_put_autosuspend(&tb->dev);
2376
2377         kfree(ev);
2378 }
2379
2380 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
2381                                  int *requested_down)
2382 {
2383         int allocated_up, allocated_down, available_up, available_down, ret;
2384         int requested_up_corrected, requested_down_corrected, granularity;
2385         int max_up, max_down, max_up_rounded, max_down_rounded;
2386         struct tb_bandwidth_group *group;
2387         struct tb *tb = tunnel->tb;
2388         struct tb_port *in, *out;
2389         bool downstream;
2390
2391         ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
2392         if (ret)
2393                 return ret;
2394
2395         in = tunnel->src_port;
2396         out = tunnel->dst_port;
2397
2398         tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n",
2399                       allocated_up, allocated_down);
2400
2401         /*
2402          * If we get rounded up request from graphics side, say HBR2 x 4
2403          * that is 17500 instead of 17280 (this is because of the
2404          * granularity), we allow it too. Here the graphics has already
2405          * negotiated with the DPRX the maximum possible rates (which is
2406          * 17280 in this case).
2407          *
2408          * Since the link cannot go higher than 17280 we use that in our
2409          * calculations but the DP IN adapter Allocated BW write must be
2410          * the same value (17500) otherwise the adapter will mark it as
2411          * failed for graphics.
2412          */
2413         ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
2414         if (ret)
2415                 goto fail;
2416
2417         ret = usb4_dp_port_granularity(in);
2418         if (ret < 0)
2419                 goto fail;
2420         granularity = ret;
2421
2422         max_up_rounded = roundup(max_up, granularity);
2423         max_down_rounded = roundup(max_down, granularity);
2424
2425         /*
2426          * This will "fix" the request down to the maximum supported
2427          * rate * lanes if it is at the maximum rounded up level.
2428          */
2429         requested_up_corrected = *requested_up;
2430         if (requested_up_corrected == max_up_rounded)
2431                 requested_up_corrected = max_up;
2432         else if (requested_up_corrected < 0)
2433                 requested_up_corrected = 0;
2434         requested_down_corrected = *requested_down;
2435         if (requested_down_corrected == max_down_rounded)
2436                 requested_down_corrected = max_down;
2437         else if (requested_down_corrected < 0)
2438                 requested_down_corrected = 0;
2439
2440         tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n",
2441                       requested_up_corrected, requested_down_corrected);
2442
2443         if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
2444             (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
2445                 tb_tunnel_dbg(tunnel,
2446                               "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
2447                               requested_up_corrected, requested_down_corrected,
2448                               max_up_rounded, max_down_rounded);
2449                 ret = -ENOBUFS;
2450                 goto fail;
2451         }
2452
2453         downstream = tb_tunnel_direction_downstream(tunnel);
2454         group = in->group;
2455
2456         if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
2457             (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
2458                 if (tunnel->bw_mode) {
2459                         int reserved;
2460                         /*
2461                          * If requested bandwidth is less or equal than
2462                          * what is currently allocated to that tunnel we
2463                          * simply change the reservation of the tunnel
2464                          * and add the released bandwidth for the group
2465                          * for the next 10s. Then we release it for
2466                          * others to use.
2467                          */
2468                         if (downstream)
2469                                 reserved = allocated_down - *requested_down;
2470                         else
2471                                 reserved = allocated_up - *requested_up;
2472
2473                         if (reserved > 0) {
2474                                 group->reserved += reserved;
2475                                 tb_dbg(tb, "group %d reserved %d total %d Mb/s\n",
2476                                        group->index, reserved, group->reserved);
2477
2478                                 /*
2479                                  * If it was not already pending,
2480                                  * schedule release now. If it is then
2481                                  * postpone it for the next 10s (unless
2482                                  * it is already running in which case
2483                                  * the 10s already expired and we should
2484                                  * give the reserved back to others).
2485                                  */
2486                                 mod_delayed_work(system_wq, &group->release_work,
2487                                         msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT));
2488                         }
2489                 }
2490
2491                 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2492                                                  requested_down);
2493         }
2494
2495         /*
2496          * More bandwidth is requested. Release all the potential
2497          * bandwidth from USB3 first.
2498          */
2499         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
2500         if (ret)
2501                 goto fail;
2502
2503         /*
2504          * Then go over all tunnels that cross the same USB4 ports (they
2505          * are also in the same group but we use the same function here
2506          * that we use with the normal bandwidth allocation).
2507          */
2508         ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
2509                                      true);
2510         if (ret)
2511                 goto reclaim;
2512
2513         tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n",
2514                       available_up, available_down, group->reserved);
2515
2516         if ((*requested_up >= 0 &&
2517                 available_up + group->reserved >= requested_up_corrected) ||
2518             (*requested_down >= 0 &&
2519                 available_down + group->reserved >= requested_down_corrected)) {
2520                 int released = 0;
2521
2522                 /*
2523                  * If bandwidth on a link is >= asym_threshold
2524                  * transition the link to asymmetric.
2525                  */
2526                 ret = tb_configure_asym(tb, in, out, *requested_up,
2527                                         *requested_down);
2528                 if (ret) {
2529                         tb_configure_sym(tb, in, out, true);
2530                         goto fail;
2531                 }
2532
2533                 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2534                                                 requested_down);
2535                 if (ret) {
2536                         tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
2537                         tb_configure_sym(tb, in, out, true);
2538                 }
2539
2540                 if (downstream) {
2541                         if (*requested_down > available_down)
2542                                 released = *requested_down - available_down;
2543                 } else {
2544                         if (*requested_up > available_up)
2545                                 released = *requested_up - available_up;
2546                 }
2547                 if (released) {
2548                         group->reserved -= released;
2549                         tb_dbg(tb, "group %d released %d total %d Mb/s\n",
2550                                group->index, released, group->reserved);
2551                 }
2552         } else {
2553                 ret = -ENOBUFS;
2554         }
2555
2556 reclaim:
2557         tb_reclaim_usb3_bandwidth(tb, in, out);
2558 fail:
2559         if (ret && ret != -ENODEV) {
2560                 /*
2561                  * Write back the same allocated (so no change), this
2562                  * makes the DPTX request fail on graphics side.
2563                  */
2564                 tb_tunnel_dbg(tunnel,
2565                               "failing the request by rewriting allocated %d/%d Mb/s\n",
2566                               allocated_up, allocated_down);
2567                 tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down);
2568         }
2569
2570         return ret;
2571 }
2572
2573 static void tb_handle_dp_bandwidth_request(struct work_struct *work)
2574 {
2575         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2576         int requested_bw, requested_up, requested_down, ret;
2577         struct tb_tunnel *tunnel;
2578         struct tb *tb = ev->tb;
2579         struct tb_cm *tcm = tb_priv(tb);
2580         struct tb_switch *sw;
2581         struct tb_port *in;
2582
2583         pm_runtime_get_sync(&tb->dev);
2584
2585         mutex_lock(&tb->lock);
2586         if (!tcm->hotplug_active)
2587                 goto unlock;
2588
2589         sw = tb_switch_find_by_route(tb, ev->route);
2590         if (!sw) {
2591                 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
2592                         ev->route);
2593                 goto unlock;
2594         }
2595
2596         in = &sw->ports[ev->port];
2597         if (!tb_port_is_dpin(in)) {
2598                 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
2599                 goto put_sw;
2600         }
2601
2602         tb_port_dbg(in, "handling bandwidth allocation request\n");
2603
2604         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
2605         if (!tunnel) {
2606                 tb_port_warn(in, "failed to find tunnel\n");
2607                 goto put_sw;
2608         }
2609
2610         if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
2611                 if (tunnel->bw_mode) {
2612                         /*
2613                          * Reset the tunnel back to use the legacy
2614                          * allocation.
2615                          */
2616                         tunnel->bw_mode = false;
2617                         tb_port_dbg(in, "DPTX disabled bandwidth allocation mode\n");
2618                 } else {
2619                         tb_port_warn(in, "bandwidth allocation mode not enabled\n");
2620                 }
2621                 goto put_sw;
2622         }
2623
2624         ret = usb4_dp_port_requested_bandwidth(in);
2625         if (ret < 0) {
2626                 if (ret == -ENODATA) {
2627                         /*
2628                          * There is no request active so this means the
2629                          * BW allocation mode was enabled from graphics
2630                          * side. At this point we know that the graphics
2631                          * driver has read the DRPX capabilities so we
2632                          * can offer an better bandwidth estimatation.
2633                          */
2634                         tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n");
2635                         tb_recalc_estimated_bandwidth(tb);
2636                 } else {
2637                         tb_port_warn(in, "failed to read requested bandwidth\n");
2638                 }
2639                 goto put_sw;
2640         }
2641         requested_bw = ret;
2642
2643         tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
2644
2645         if (tb_tunnel_direction_downstream(tunnel)) {
2646                 requested_up = -1;
2647                 requested_down = requested_bw;
2648         } else {
2649                 requested_up = requested_bw;
2650                 requested_down = -1;
2651         }
2652
2653         ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
2654         if (ret) {
2655                 if (ret == -ENOBUFS)
2656                         tb_tunnel_warn(tunnel,
2657                                        "not enough bandwidth available\n");
2658                 else
2659                         tb_tunnel_warn(tunnel,
2660                                        "failed to change bandwidth allocation\n");
2661         } else {
2662                 tb_tunnel_dbg(tunnel,
2663                               "bandwidth allocation changed to %d/%d Mb/s\n",
2664                               requested_up, requested_down);
2665
2666                 /* Update other clients about the allocation change */
2667                 tb_recalc_estimated_bandwidth(tb);
2668         }
2669
2670 put_sw:
2671         tb_switch_put(sw);
2672 unlock:
2673         mutex_unlock(&tb->lock);
2674
2675         pm_runtime_mark_last_busy(&tb->dev);
2676         pm_runtime_put_autosuspend(&tb->dev);
2677
2678         kfree(ev);
2679 }
2680
2681 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
2682 {
2683         struct tb_hotplug_event *ev;
2684
2685         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
2686         if (!ev)
2687                 return;
2688
2689         ev->tb = tb;
2690         ev->route = route;
2691         ev->port = port;
2692         INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
2693         queue_work(tb->wq, &ev->work);
2694 }
2695
2696 static void tb_handle_notification(struct tb *tb, u64 route,
2697                                    const struct cfg_error_pkg *error)
2698 {
2699
2700         switch (error->error) {
2701         case TB_CFG_ERROR_PCIE_WAKE:
2702         case TB_CFG_ERROR_DP_CON_CHANGE:
2703         case TB_CFG_ERROR_DPTX_DISCOVERY:
2704                 if (tb_cfg_ack_notification(tb->ctl, route, error))
2705                         tb_warn(tb, "could not ack notification on %llx\n",
2706                                 route);
2707                 break;
2708
2709         case TB_CFG_ERROR_DP_BW:
2710                 if (tb_cfg_ack_notification(tb->ctl, route, error))
2711                         tb_warn(tb, "could not ack notification on %llx\n",
2712                                 route);
2713                 tb_queue_dp_bandwidth_request(tb, route, error->port);
2714                 break;
2715
2716         default:
2717                 /* Ignore for now */
2718                 break;
2719         }
2720 }
2721
2722 /*
2723  * tb_schedule_hotplug_handler() - callback function for the control channel
2724  *
2725  * Delegates to tb_handle_hotplug.
2726  */
2727 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
2728                             const void *buf, size_t size)
2729 {
2730         const struct cfg_event_pkg *pkg = buf;
2731         u64 route = tb_cfg_get_route(&pkg->header);
2732
2733         switch (type) {
2734         case TB_CFG_PKG_ERROR:
2735                 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
2736                 return;
2737         case TB_CFG_PKG_EVENT:
2738                 break;
2739         default:
2740                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
2741                 return;
2742         }
2743
2744         if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
2745                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
2746                         pkg->port);
2747         }
2748
2749         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2750 }
2751
2752 static void tb_stop(struct tb *tb)
2753 {
2754         struct tb_cm *tcm = tb_priv(tb);
2755         struct tb_tunnel *tunnel;
2756         struct tb_tunnel *n;
2757
2758         cancel_delayed_work(&tcm->remove_work);
2759         /* tunnels are only present after everything has been initialized */
2760         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2761                 /*
2762                  * DMA tunnels require the driver to be functional so we
2763                  * tear them down. Other protocol tunnels can be left
2764                  * intact.
2765                  */
2766                 if (tb_tunnel_is_dma(tunnel))
2767                         tb_tunnel_deactivate(tunnel);
2768                 tb_tunnel_free(tunnel);
2769         }
2770         tb_switch_remove(tb->root_switch);
2771         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2772 }
2773
2774 static void tb_deinit(struct tb *tb)
2775 {
2776         struct tb_cm *tcm = tb_priv(tb);
2777         int i;
2778
2779         /* Cancel all the release bandwidth workers */
2780         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++)
2781                 cancel_delayed_work_sync(&tcm->groups[i].release_work);
2782 }
2783
2784 static int tb_scan_finalize_switch(struct device *dev, void *data)
2785 {
2786         if (tb_is_switch(dev)) {
2787                 struct tb_switch *sw = tb_to_switch(dev);
2788
2789                 /*
2790                  * If we found that the switch was already setup by the
2791                  * boot firmware, mark it as authorized now before we
2792                  * send uevent to userspace.
2793                  */
2794                 if (sw->boot)
2795                         sw->authorized = 1;
2796
2797                 dev_set_uevent_suppress(dev, false);
2798                 kobject_uevent(&dev->kobj, KOBJ_ADD);
2799                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
2800         }
2801
2802         return 0;
2803 }
2804
2805 static int tb_start(struct tb *tb, bool reset)
2806 {
2807         struct tb_cm *tcm = tb_priv(tb);
2808         bool discover = true;
2809         int ret;
2810
2811         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2812         if (IS_ERR(tb->root_switch))
2813                 return PTR_ERR(tb->root_switch);
2814
2815         /*
2816          * ICM firmware upgrade needs running firmware and in native
2817          * mode that is not available so disable firmware upgrade of the
2818          * root switch.
2819          *
2820          * However, USB4 routers support NVM firmware upgrade if they
2821          * implement the necessary router operations.
2822          */
2823         tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
2824         /* All USB4 routers support runtime PM */
2825         tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2826
2827         ret = tb_switch_configure(tb->root_switch);
2828         if (ret) {
2829                 tb_switch_put(tb->root_switch);
2830                 return ret;
2831         }
2832
2833         /* Announce the switch to the world */
2834         ret = tb_switch_add(tb->root_switch);
2835         if (ret) {
2836                 tb_switch_put(tb->root_switch);
2837                 return ret;
2838         }
2839
2840         /*
2841          * To support highest CLx state, we set host router's TMU to
2842          * Normal mode.
2843          */
2844         tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2845         /* Enable TMU if it is off */
2846         tb_switch_tmu_enable(tb->root_switch);
2847
2848         /*
2849          * Boot firmware might have created tunnels of its own. Since we
2850          * cannot be sure they are usable for us, tear them down and
2851          * reset the ports to handle it as new hotplug for USB4 v1
2852          * routers (for USB4 v2 and beyond we already do host reset).
2853          */
2854         if (reset && tb_switch_is_usb4(tb->root_switch)) {
2855                 discover = false;
2856                 if (usb4_switch_version(tb->root_switch) == 1)
2857                         tb_switch_reset(tb->root_switch);
2858         }
2859
2860         if (discover) {
2861                 /* Full scan to discover devices added before the driver was loaded. */
2862                 tb_scan_switch(tb->root_switch);
2863                 /* Find out tunnels created by the boot firmware */
2864                 tb_discover_tunnels(tb);
2865                 /* Add DP resources from the DP tunnels created by the boot firmware */
2866                 tb_discover_dp_resources(tb);
2867         }
2868
2869         /*
2870          * If the boot firmware did not create USB 3.x tunnels create them
2871          * now for the whole topology.
2872          */
2873         tb_create_usb3_tunnels(tb->root_switch);
2874         /* Add DP IN resources for the root switch */
2875         tb_add_dp_resources(tb->root_switch);
2876         /* Make the discovered switches available to the userspace */
2877         device_for_each_child(&tb->root_switch->dev, NULL,
2878                               tb_scan_finalize_switch);
2879
2880         /* Allow tb_handle_hotplug to progress events */
2881         tcm->hotplug_active = true;
2882         return 0;
2883 }
2884
2885 static int tb_suspend_noirq(struct tb *tb)
2886 {
2887         struct tb_cm *tcm = tb_priv(tb);
2888
2889         tb_dbg(tb, "suspending...\n");
2890         tb_disconnect_and_release_dp(tb);
2891         tb_switch_suspend(tb->root_switch, false);
2892         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2893         tb_dbg(tb, "suspend finished\n");
2894
2895         return 0;
2896 }
2897
2898 static void tb_restore_children(struct tb_switch *sw)
2899 {
2900         struct tb_port *port;
2901
2902         /* No need to restore if the router is already unplugged */
2903         if (sw->is_unplugged)
2904                 return;
2905
2906         if (tb_enable_clx(sw))
2907                 tb_sw_warn(sw, "failed to re-enable CL states\n");
2908
2909         if (tb_enable_tmu(sw))
2910                 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2911
2912         tb_switch_configuration_valid(sw);
2913
2914         tb_switch_for_each_port(sw, port) {
2915                 if (!tb_port_has_remote(port) && !port->xdomain)
2916                         continue;
2917
2918                 if (port->remote) {
2919                         tb_switch_set_link_width(port->remote->sw,
2920                                                  port->remote->sw->link_width);
2921                         tb_switch_configure_link(port->remote->sw);
2922
2923                         tb_restore_children(port->remote->sw);
2924                 } else if (port->xdomain) {
2925                         tb_port_configure_xdomain(port, port->xdomain);
2926                 }
2927         }
2928 }
2929
2930 static int tb_resume_noirq(struct tb *tb)
2931 {
2932         struct tb_cm *tcm = tb_priv(tb);
2933         struct tb_tunnel *tunnel, *n;
2934         unsigned int usb3_delay = 0;
2935         LIST_HEAD(tunnels);
2936
2937         tb_dbg(tb, "resuming...\n");
2938
2939         /*
2940          * For non-USB4 hosts (Apple systems) remove any PCIe devices
2941          * the firmware might have setup.
2942          */
2943         if (!tb_switch_is_usb4(tb->root_switch))
2944                 tb_switch_reset(tb->root_switch);
2945
2946         tb_switch_resume(tb->root_switch, false);
2947         tb_free_invalid_tunnels(tb);
2948         tb_free_unplugged_children(tb->root_switch);
2949         tb_restore_children(tb->root_switch);
2950
2951         /*
2952          * If we get here from suspend to disk the boot firmware or the
2953          * restore kernel might have created tunnels of its own. Since
2954          * we cannot be sure they are usable for us we find and tear
2955          * them down.
2956          */
2957         tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2958         list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
2959                 if (tb_tunnel_is_usb3(tunnel))
2960                         usb3_delay = 500;
2961                 tb_tunnel_deactivate(tunnel);
2962                 tb_tunnel_free(tunnel);
2963         }
2964
2965         /* Re-create our tunnels now */
2966         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2967                 /* USB3 requires delay before it can be re-activated */
2968                 if (tb_tunnel_is_usb3(tunnel)) {
2969                         msleep(usb3_delay);
2970                         /* Only need to do it once */
2971                         usb3_delay = 0;
2972                 }
2973                 tb_tunnel_restart(tunnel);
2974         }
2975         if (!list_empty(&tcm->tunnel_list)) {
2976                 /*
2977                  * the pcie links need some time to get going.
2978                  * 100ms works for me...
2979                  */
2980                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
2981                 msleep(100);
2982         }
2983          /* Allow tb_handle_hotplug to progress events */
2984         tcm->hotplug_active = true;
2985         tb_dbg(tb, "resume finished\n");
2986
2987         return 0;
2988 }
2989
2990 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2991 {
2992         struct tb_port *port;
2993         int ret = 0;
2994
2995         tb_switch_for_each_port(sw, port) {
2996                 if (tb_is_upstream_port(port))
2997                         continue;
2998                 if (port->xdomain && port->xdomain->is_unplugged) {
2999                         tb_retimer_remove_all(port);
3000                         tb_xdomain_remove(port->xdomain);
3001                         tb_port_unconfigure_xdomain(port);
3002                         port->xdomain = NULL;
3003                         ret++;
3004                 } else if (port->remote) {
3005                         ret += tb_free_unplugged_xdomains(port->remote->sw);
3006                 }
3007         }
3008
3009         return ret;
3010 }
3011
3012 static int tb_freeze_noirq(struct tb *tb)
3013 {
3014         struct tb_cm *tcm = tb_priv(tb);
3015
3016         tcm->hotplug_active = false;
3017         return 0;
3018 }
3019
3020 static int tb_thaw_noirq(struct tb *tb)
3021 {
3022         struct tb_cm *tcm = tb_priv(tb);
3023
3024         tcm->hotplug_active = true;
3025         return 0;
3026 }
3027
3028 static void tb_complete(struct tb *tb)
3029 {
3030         /*
3031          * Release any unplugged XDomains and if there is a case where
3032          * another domain is swapped in place of unplugged XDomain we
3033          * need to run another rescan.
3034          */
3035         mutex_lock(&tb->lock);
3036         if (tb_free_unplugged_xdomains(tb->root_switch))
3037                 tb_scan_switch(tb->root_switch);
3038         mutex_unlock(&tb->lock);
3039 }
3040
3041 static int tb_runtime_suspend(struct tb *tb)
3042 {
3043         struct tb_cm *tcm = tb_priv(tb);
3044
3045         mutex_lock(&tb->lock);
3046         tb_switch_suspend(tb->root_switch, true);
3047         tcm->hotplug_active = false;
3048         mutex_unlock(&tb->lock);
3049
3050         return 0;
3051 }
3052
3053 static void tb_remove_work(struct work_struct *work)
3054 {
3055         struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
3056         struct tb *tb = tcm_to_tb(tcm);
3057
3058         mutex_lock(&tb->lock);
3059         if (tb->root_switch) {
3060                 tb_free_unplugged_children(tb->root_switch);
3061                 tb_free_unplugged_xdomains(tb->root_switch);
3062         }
3063         mutex_unlock(&tb->lock);
3064 }
3065
3066 static int tb_runtime_resume(struct tb *tb)
3067 {
3068         struct tb_cm *tcm = tb_priv(tb);
3069         struct tb_tunnel *tunnel, *n;
3070
3071         mutex_lock(&tb->lock);
3072         tb_switch_resume(tb->root_switch, true);
3073         tb_free_invalid_tunnels(tb);
3074         tb_restore_children(tb->root_switch);
3075         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
3076                 tb_tunnel_restart(tunnel);
3077         tcm->hotplug_active = true;
3078         mutex_unlock(&tb->lock);
3079
3080         /*
3081          * Schedule cleanup of any unplugged devices. Run this in a
3082          * separate thread to avoid possible deadlock if the device
3083          * removal runtime resumes the unplugged device.
3084          */
3085         queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
3086         return 0;
3087 }
3088
3089 static const struct tb_cm_ops tb_cm_ops = {
3090         .start = tb_start,
3091         .stop = tb_stop,
3092         .deinit = tb_deinit,
3093         .suspend_noirq = tb_suspend_noirq,
3094         .resume_noirq = tb_resume_noirq,
3095         .freeze_noirq = tb_freeze_noirq,
3096         .thaw_noirq = tb_thaw_noirq,
3097         .complete = tb_complete,
3098         .runtime_suspend = tb_runtime_suspend,
3099         .runtime_resume = tb_runtime_resume,
3100         .handle_event = tb_handle_event,
3101         .disapprove_switch = tb_disconnect_pci,
3102         .approve_switch = tb_tunnel_pci,
3103         .approve_xdomain_paths = tb_approve_xdomain_paths,
3104         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
3105 };
3106
3107 /*
3108  * During suspend the Thunderbolt controller is reset and all PCIe
3109  * tunnels are lost. The NHI driver will try to reestablish all tunnels
3110  * during resume. This adds device links between the tunneled PCIe
3111  * downstream ports and the NHI so that the device core will make sure
3112  * NHI is resumed first before the rest.
3113  */
3114 static bool tb_apple_add_links(struct tb_nhi *nhi)
3115 {
3116         struct pci_dev *upstream, *pdev;
3117         bool ret;
3118
3119         if (!x86_apple_machine)
3120                 return false;
3121
3122         switch (nhi->pdev->device) {
3123         case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
3124         case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
3125         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
3126         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
3127                 break;
3128         default:
3129                 return false;
3130         }
3131
3132         upstream = pci_upstream_bridge(nhi->pdev);
3133         while (upstream) {
3134                 if (!pci_is_pcie(upstream))
3135                         return false;
3136                 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
3137                         break;
3138                 upstream = pci_upstream_bridge(upstream);
3139         }
3140
3141         if (!upstream)
3142                 return false;
3143
3144         /*
3145          * For each hotplug downstream port, create add device link
3146          * back to NHI so that PCIe tunnels can be re-established after
3147          * sleep.
3148          */
3149         ret = false;
3150         for_each_pci_bridge(pdev, upstream->subordinate) {
3151                 const struct device_link *link;
3152
3153                 if (!pci_is_pcie(pdev))
3154                         continue;
3155                 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
3156                     !pdev->is_hotplug_bridge)
3157                         continue;
3158
3159                 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
3160                                        DL_FLAG_AUTOREMOVE_SUPPLIER |
3161                                        DL_FLAG_PM_RUNTIME);
3162                 if (link) {
3163                         dev_dbg(&nhi->pdev->dev, "created link from %s\n",
3164                                 dev_name(&pdev->dev));
3165                         ret = true;
3166                 } else {
3167                         dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
3168                                  dev_name(&pdev->dev));
3169                 }
3170         }
3171
3172         return ret;
3173 }
3174
3175 struct tb *tb_probe(struct tb_nhi *nhi)
3176 {
3177         struct tb_cm *tcm;
3178         struct tb *tb;
3179
3180         tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
3181         if (!tb)
3182                 return NULL;
3183
3184         if (tb_acpi_may_tunnel_pcie())
3185                 tb->security_level = TB_SECURITY_USER;
3186         else
3187                 tb->security_level = TB_SECURITY_NOPCIE;
3188
3189         tb->cm_ops = &tb_cm_ops;
3190
3191         tcm = tb_priv(tb);
3192         INIT_LIST_HEAD(&tcm->tunnel_list);
3193         INIT_LIST_HEAD(&tcm->dp_resources);
3194         INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
3195         tb_init_bandwidth_groups(tcm);
3196
3197         tb_dbg(tb, "using software connection manager\n");
3198
3199         /*
3200          * Device links are needed to make sure we establish tunnels
3201          * before the PCIe/USB stack is resumed so complain here if we
3202          * found them missing.
3203          */
3204         if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
3205                 tb_warn(tb, "device links to tunneled native ports are missing!\n");
3206
3207         return tb;
3208 }
This page took 0.215121 seconds and 4 git commands to generate.