]> Git Repo - linux.git/blob - drivers/thunderbolt/tb.c
Merge patch series "riscv: Extension parsing fixes"
[linux.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <[email protected]>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
14
15 #include "tb.h"
16 #include "tb_regs.h"
17 #include "tunnel.h"
18
19 #define TB_TIMEOUT              100     /* ms */
20 #define TB_RELEASE_BW_TIMEOUT   10000   /* ms */
21
22 /*
23  * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
24  * direction. This is 40G - 10% guard band bandwidth.
25  */
26 #define TB_ASYM_MIN             (40000 * 90 / 100)
27
28 /*
29  * Threshold bandwidth (in Mb/s) that is used to switch the links to
30  * asymmetric and back. This is selected as 45G which means when the
31  * request is higher than this, we switch the link to asymmetric, and
32  * when it is less than this we switch it back. The 45G is selected so
33  * that we still have 27G (of the total 72G) for bulk PCIe traffic when
34  * switching back to symmetric.
35  */
36 #define TB_ASYM_THRESHOLD       45000
37
38 #define MAX_GROUPS              7       /* max Group_ID is 7 */
39
40 static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
41 module_param_named(asym_threshold, asym_threshold, uint, 0444);
42 MODULE_PARM_DESC(asym_threshold,
43                 "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
44                 __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
45
46 /**
47  * struct tb_cm - Simple Thunderbolt connection manager
48  * @tunnel_list: List of active tunnels
49  * @dp_resources: List of available DP resources for DP tunneling
50  * @hotplug_active: tb_handle_hotplug will stop progressing plug
51  *                  events and exit if this is not set (it needs to
52  *                  acquire the lock one more time). Used to drain wq
53  *                  after cfg has been paused.
54  * @remove_work: Work used to remove any unplugged routers after
55  *               runtime resume
56  * @groups: Bandwidth groups used in this domain.
57  */
58 struct tb_cm {
59         struct list_head tunnel_list;
60         struct list_head dp_resources;
61         bool hotplug_active;
62         struct delayed_work remove_work;
63         struct tb_bandwidth_group groups[MAX_GROUPS];
64 };
65
66 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
67 {
68         return ((void *)tcm - sizeof(struct tb));
69 }
70
71 struct tb_hotplug_event {
72         struct work_struct work;
73         struct tb *tb;
74         u64 route;
75         u8 port;
76         bool unplug;
77 };
78
79 static void tb_handle_hotplug(struct work_struct *work);
80
81 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
82 {
83         struct tb_hotplug_event *ev;
84
85         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
86         if (!ev)
87                 return;
88
89         ev->tb = tb;
90         ev->route = route;
91         ev->port = port;
92         ev->unplug = unplug;
93         INIT_WORK(&ev->work, tb_handle_hotplug);
94         queue_work(tb->wq, &ev->work);
95 }
96
97 /* enumeration & hot plug handling */
98
99 static void tb_add_dp_resources(struct tb_switch *sw)
100 {
101         struct tb_cm *tcm = tb_priv(sw->tb);
102         struct tb_port *port;
103
104         tb_switch_for_each_port(sw, port) {
105                 if (!tb_port_is_dpin(port))
106                         continue;
107
108                 if (!tb_switch_query_dp_resource(sw, port))
109                         continue;
110
111                 /*
112                  * If DP IN on device router exist, position it at the
113                  * beginning of the DP resources list, so that it is used
114                  * before DP IN of the host router. This way external GPU(s)
115                  * will be prioritized when pairing DP IN to a DP OUT.
116                  */
117                 if (tb_route(sw))
118                         list_add(&port->list, &tcm->dp_resources);
119                 else
120                         list_add_tail(&port->list, &tcm->dp_resources);
121
122                 tb_port_dbg(port, "DP IN resource available\n");
123         }
124 }
125
126 static void tb_remove_dp_resources(struct tb_switch *sw)
127 {
128         struct tb_cm *tcm = tb_priv(sw->tb);
129         struct tb_port *port, *tmp;
130
131         /* Clear children resources first */
132         tb_switch_for_each_port(sw, port) {
133                 if (tb_port_has_remote(port))
134                         tb_remove_dp_resources(port->remote->sw);
135         }
136
137         list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
138                 if (port->sw == sw) {
139                         tb_port_dbg(port, "DP OUT resource unavailable\n");
140                         list_del_init(&port->list);
141                 }
142         }
143 }
144
145 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
146 {
147         struct tb_cm *tcm = tb_priv(tb);
148         struct tb_port *p;
149
150         list_for_each_entry(p, &tcm->dp_resources, list) {
151                 if (p == port)
152                         return;
153         }
154
155         tb_port_dbg(port, "DP %s resource available discovered\n",
156                     tb_port_is_dpin(port) ? "IN" : "OUT");
157         list_add_tail(&port->list, &tcm->dp_resources);
158 }
159
160 static void tb_discover_dp_resources(struct tb *tb)
161 {
162         struct tb_cm *tcm = tb_priv(tb);
163         struct tb_tunnel *tunnel;
164
165         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
166                 if (tb_tunnel_is_dp(tunnel))
167                         tb_discover_dp_resource(tb, tunnel->dst_port);
168         }
169 }
170
171 /* Enables CL states up to host router */
172 static int tb_enable_clx(struct tb_switch *sw)
173 {
174         struct tb_cm *tcm = tb_priv(sw->tb);
175         unsigned int clx = TB_CL0S | TB_CL1;
176         const struct tb_tunnel *tunnel;
177         int ret;
178
179         /*
180          * Currently only enable CLx for the first link. This is enough
181          * to allow the CPU to save energy at least on Intel hardware
182          * and makes it slightly simpler to implement. We may change
183          * this in the future to cover the whole topology if it turns
184          * out to be beneficial.
185          */
186         while (sw && tb_switch_depth(sw) > 1)
187                 sw = tb_switch_parent(sw);
188
189         if (!sw)
190                 return 0;
191
192         if (tb_switch_depth(sw) != 1)
193                 return 0;
194
195         /*
196          * If we are re-enabling then check if there is an active DMA
197          * tunnel and in that case bail out.
198          */
199         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
200                 if (tb_tunnel_is_dma(tunnel)) {
201                         if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
202                                 return 0;
203                 }
204         }
205
206         /*
207          * Initially try with CL2. If that's not supported by the
208          * topology try with CL0s and CL1 and then give up.
209          */
210         ret = tb_switch_clx_enable(sw, clx | TB_CL2);
211         if (ret == -EOPNOTSUPP)
212                 ret = tb_switch_clx_enable(sw, clx);
213         return ret == -EOPNOTSUPP ? 0 : ret;
214 }
215
216 /**
217  * tb_disable_clx() - Disable CL states up to host router
218  * @sw: Router to start
219  *
220  * Disables CL states from @sw up to the host router. Returns true if
221  * any CL state were disabled. This can be used to figure out whether
222  * the link was setup by us or the boot firmware so we don't
223  * accidentally enable them if they were not enabled during discovery.
224  */
225 static bool tb_disable_clx(struct tb_switch *sw)
226 {
227         bool disabled = false;
228
229         do {
230                 int ret;
231
232                 ret = tb_switch_clx_disable(sw);
233                 if (ret > 0)
234                         disabled = true;
235                 else if (ret < 0)
236                         tb_sw_warn(sw, "failed to disable CL states\n");
237
238                 sw = tb_switch_parent(sw);
239         } while (sw);
240
241         return disabled;
242 }
243
244 static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
245 {
246         struct tb_switch *sw;
247
248         sw = tb_to_switch(dev);
249         if (!sw)
250                 return 0;
251
252         if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
253                 enum tb_switch_tmu_mode mode;
254                 int ret;
255
256                 if (tb_switch_clx_is_enabled(sw, TB_CL1))
257                         mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
258                 else
259                         mode = TB_SWITCH_TMU_MODE_HIFI_BI;
260
261                 ret = tb_switch_tmu_configure(sw, mode);
262                 if (ret)
263                         return ret;
264
265                 return tb_switch_tmu_enable(sw);
266         }
267
268         return 0;
269 }
270
271 static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
272 {
273         struct tb_switch *sw;
274
275         if (!tunnel)
276                 return;
277
278         /*
279          * Once first DP tunnel is established we change the TMU
280          * accuracy of first depth child routers (and the host router)
281          * to the highest. This is needed for the DP tunneling to work
282          * but also allows CL0s.
283          *
284          * If both routers are v2 then we don't need to do anything as
285          * they are using enhanced TMU mode that allows all CLx.
286          */
287         sw = tunnel->tb->root_switch;
288         device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
289 }
290
291 static int tb_enable_tmu(struct tb_switch *sw)
292 {
293         int ret;
294
295         /*
296          * If both routers at the end of the link are v2 we simply
297          * enable the enhanched uni-directional mode. That covers all
298          * the CL states. For v1 and before we need to use the normal
299          * rate to allow CL1 (when supported). Otherwise we keep the TMU
300          * running at the highest accuracy.
301          */
302         ret = tb_switch_tmu_configure(sw,
303                         TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
304         if (ret == -EOPNOTSUPP) {
305                 if (tb_switch_clx_is_enabled(sw, TB_CL1))
306                         ret = tb_switch_tmu_configure(sw,
307                                         TB_SWITCH_TMU_MODE_LOWRES);
308                 else
309                         ret = tb_switch_tmu_configure(sw,
310                                         TB_SWITCH_TMU_MODE_HIFI_BI);
311         }
312         if (ret)
313                 return ret;
314
315         /* If it is already enabled in correct mode, don't touch it */
316         if (tb_switch_tmu_is_enabled(sw))
317                 return 0;
318
319         ret = tb_switch_tmu_disable(sw);
320         if (ret)
321                 return ret;
322
323         ret = tb_switch_tmu_post_time(sw);
324         if (ret)
325                 return ret;
326
327         return tb_switch_tmu_enable(sw);
328 }
329
330 static void tb_switch_discover_tunnels(struct tb_switch *sw,
331                                        struct list_head *list,
332                                        bool alloc_hopids)
333 {
334         struct tb *tb = sw->tb;
335         struct tb_port *port;
336
337         tb_switch_for_each_port(sw, port) {
338                 struct tb_tunnel *tunnel = NULL;
339
340                 switch (port->config.type) {
341                 case TB_TYPE_DP_HDMI_IN:
342                         tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
343                         tb_increase_tmu_accuracy(tunnel);
344                         break;
345
346                 case TB_TYPE_PCIE_DOWN:
347                         tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
348                         break;
349
350                 case TB_TYPE_USB3_DOWN:
351                         tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
352                         break;
353
354                 default:
355                         break;
356                 }
357
358                 if (tunnel)
359                         list_add_tail(&tunnel->list, list);
360         }
361
362         tb_switch_for_each_port(sw, port) {
363                 if (tb_port_has_remote(port)) {
364                         tb_switch_discover_tunnels(port->remote->sw, list,
365                                                    alloc_hopids);
366                 }
367         }
368 }
369
370 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
371 {
372         if (tb_switch_is_usb4(port->sw))
373                 return usb4_port_configure_xdomain(port, xd);
374         return tb_lc_configure_xdomain(port);
375 }
376
377 static void tb_port_unconfigure_xdomain(struct tb_port *port)
378 {
379         if (tb_switch_is_usb4(port->sw))
380                 usb4_port_unconfigure_xdomain(port);
381         else
382                 tb_lc_unconfigure_xdomain(port);
383 }
384
385 static void tb_scan_xdomain(struct tb_port *port)
386 {
387         struct tb_switch *sw = port->sw;
388         struct tb *tb = sw->tb;
389         struct tb_xdomain *xd;
390         u64 route;
391
392         if (!tb_is_xdomain_enabled())
393                 return;
394
395         route = tb_downstream_route(port);
396         xd = tb_xdomain_find_by_route(tb, route);
397         if (xd) {
398                 tb_xdomain_put(xd);
399                 return;
400         }
401
402         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
403                               NULL);
404         if (xd) {
405                 tb_port_at(route, sw)->xdomain = xd;
406                 tb_port_configure_xdomain(port, xd);
407                 tb_xdomain_add(xd);
408         }
409 }
410
411 /**
412  * tb_find_unused_port() - return the first inactive port on @sw
413  * @sw: Switch to find the port on
414  * @type: Port type to look for
415  */
416 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
417                                            enum tb_port_type type)
418 {
419         struct tb_port *port;
420
421         tb_switch_for_each_port(sw, port) {
422                 if (tb_is_upstream_port(port))
423                         continue;
424                 if (port->config.type != type)
425                         continue;
426                 if (!port->cap_adap)
427                         continue;
428                 if (tb_port_is_enabled(port))
429                         continue;
430                 return port;
431         }
432         return NULL;
433 }
434
435 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
436                                          const struct tb_port *port)
437 {
438         struct tb_port *down;
439
440         down = usb4_switch_map_usb3_down(sw, port);
441         if (down && !tb_usb3_port_is_enabled(down))
442                 return down;
443         return NULL;
444 }
445
446 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
447                                         struct tb_port *src_port,
448                                         struct tb_port *dst_port)
449 {
450         struct tb_cm *tcm = tb_priv(tb);
451         struct tb_tunnel *tunnel;
452
453         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
454                 if (tunnel->type == type &&
455                     ((src_port && src_port == tunnel->src_port) ||
456                      (dst_port && dst_port == tunnel->dst_port))) {
457                         return tunnel;
458                 }
459         }
460
461         return NULL;
462 }
463
464 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
465                                                    struct tb_port *src_port,
466                                                    struct tb_port *dst_port)
467 {
468         struct tb_port *port, *usb3_down;
469         struct tb_switch *sw;
470
471         /* Pick the router that is deepest in the topology */
472         if (tb_port_path_direction_downstream(src_port, dst_port))
473                 sw = dst_port->sw;
474         else
475                 sw = src_port->sw;
476
477         /* Can't be the host router */
478         if (sw == tb->root_switch)
479                 return NULL;
480
481         /* Find the downstream USB4 port that leads to this router */
482         port = tb_port_at(tb_route(sw), tb->root_switch);
483         /* Find the corresponding host router USB3 downstream port */
484         usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
485         if (!usb3_down)
486                 return NULL;
487
488         return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
489 }
490
491 /**
492  * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
493  * @tb: Domain structure
494  * @src_port: Source protocol adapter
495  * @dst_port: Destination protocol adapter
496  * @port: USB4 port the consumed bandwidth is calculated
497  * @consumed_up: Consumed upsream bandwidth (Mb/s)
498  * @consumed_down: Consumed downstream bandwidth (Mb/s)
499  *
500  * Calculates consumed USB3 and PCIe bandwidth at @port between path
501  * from @src_port to @dst_port. Does not take tunnel starting from
502  * @src_port and ending from @src_port into account.
503  */
504 static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
505                                            struct tb_port *src_port,
506                                            struct tb_port *dst_port,
507                                            struct tb_port *port,
508                                            int *consumed_up,
509                                            int *consumed_down)
510 {
511         int pci_consumed_up, pci_consumed_down;
512         struct tb_tunnel *tunnel;
513
514         *consumed_up = *consumed_down = 0;
515
516         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
517         if (tunnel && tunnel->src_port != src_port &&
518             tunnel->dst_port != dst_port) {
519                 int ret;
520
521                 ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
522                                                    consumed_down);
523                 if (ret)
524                         return ret;
525         }
526
527         /*
528          * If there is anything reserved for PCIe bulk traffic take it
529          * into account here too.
530          */
531         if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
532                 *consumed_up += pci_consumed_up;
533                 *consumed_down += pci_consumed_down;
534         }
535
536         return 0;
537 }
538
539 /**
540  * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
541  * @tb: Domain structure
542  * @src_port: Source protocol adapter
543  * @dst_port: Destination protocol adapter
544  * @port: USB4 port the consumed bandwidth is calculated
545  * @consumed_up: Consumed upsream bandwidth (Mb/s)
546  * @consumed_down: Consumed downstream bandwidth (Mb/s)
547  *
548  * Calculates consumed DP bandwidth at @port between path from @src_port
549  * to @dst_port. Does not take tunnel starting from @src_port and ending
550  * from @src_port into account.
551  *
552  * If there is bandwidth reserved for any of the groups between
553  * @src_port and @dst_port (but not yet used) that is also taken into
554  * account in the returned consumed bandwidth.
555  */
556 static int tb_consumed_dp_bandwidth(struct tb *tb,
557                                     struct tb_port *src_port,
558                                     struct tb_port *dst_port,
559                                     struct tb_port *port,
560                                     int *consumed_up,
561                                     int *consumed_down)
562 {
563         int group_reserved[MAX_GROUPS] = {};
564         struct tb_cm *tcm = tb_priv(tb);
565         struct tb_tunnel *tunnel;
566         bool downstream;
567         int i, ret;
568
569         *consumed_up = *consumed_down = 0;
570
571         /*
572          * Find all DP tunnels that cross the port and reduce
573          * their consumed bandwidth from the available.
574          */
575         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
576                 const struct tb_bandwidth_group *group;
577                 int dp_consumed_up, dp_consumed_down;
578
579                 if (tb_tunnel_is_invalid(tunnel))
580                         continue;
581
582                 if (!tb_tunnel_is_dp(tunnel))
583                         continue;
584
585                 if (!tb_tunnel_port_on_path(tunnel, port))
586                         continue;
587
588                 /*
589                  * Calculate what is reserved for groups crossing the
590                  * same ports only once (as that is reserved for all the
591                  * tunnels in the group).
592                  */
593                 group = tunnel->src_port->group;
594                 if (group && group->reserved && !group_reserved[group->index])
595                         group_reserved[group->index] = group->reserved;
596
597                 /*
598                  * Ignore the DP tunnel between src_port and dst_port
599                  * because it is the same tunnel and we may be
600                  * re-calculating estimated bandwidth.
601                  */
602                 if (tunnel->src_port == src_port &&
603                     tunnel->dst_port == dst_port)
604                         continue;
605
606                 ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
607                                                    &dp_consumed_down);
608                 if (ret)
609                         return ret;
610
611                 *consumed_up += dp_consumed_up;
612                 *consumed_down += dp_consumed_down;
613         }
614
615         downstream = tb_port_path_direction_downstream(src_port, dst_port);
616         for (i = 0; i < ARRAY_SIZE(group_reserved); i++) {
617                 if (downstream)
618                         *consumed_down += group_reserved[i];
619                 else
620                         *consumed_up += group_reserved[i];
621         }
622
623         return 0;
624 }
625
626 static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
627                               struct tb_port *port)
628 {
629         bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
630         enum tb_link_width width;
631
632         if (tb_is_upstream_port(port))
633                 width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
634         else
635                 width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
636
637         return tb_port_width_supported(port, width);
638 }
639
640 /**
641  * tb_maximum_bandwidth() - Maximum bandwidth over a single link
642  * @tb: Domain structure
643  * @src_port: Source protocol adapter
644  * @dst_port: Destination protocol adapter
645  * @port: USB4 port the total bandwidth is calculated
646  * @max_up: Maximum upstream bandwidth (Mb/s)
647  * @max_down: Maximum downstream bandwidth (Mb/s)
648  * @include_asym: Include bandwidth if the link is switched from
649  *                symmetric to asymmetric
650  *
651  * Returns maximum possible bandwidth in @max_up and @max_down over a
652  * single link at @port. If @include_asym is set then includes the
653  * additional banwdith if the links are transitioned into asymmetric to
654  * direction from @src_port to @dst_port.
655  */
656 static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
657                                 struct tb_port *dst_port, struct tb_port *port,
658                                 int *max_up, int *max_down, bool include_asym)
659 {
660         bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
661         int link_speed, link_width, up_bw, down_bw;
662
663         /*
664          * Can include asymmetric, only if it is actually supported by
665          * the lane adapter.
666          */
667         if (!tb_asym_supported(src_port, dst_port, port))
668                 include_asym = false;
669
670         if (tb_is_upstream_port(port)) {
671                 link_speed = port->sw->link_speed;
672                 /*
673                  * sw->link_width is from upstream perspective so we use
674                  * the opposite for downstream of the host router.
675                  */
676                 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
677                         up_bw = link_speed * 3 * 1000;
678                         down_bw = link_speed * 1 * 1000;
679                 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
680                         up_bw = link_speed * 1 * 1000;
681                         down_bw = link_speed * 3 * 1000;
682                 } else if (include_asym) {
683                         /*
684                          * The link is symmetric at the moment but we
685                          * can switch it to asymmetric as needed. Report
686                          * this bandwidth as available (even though it
687                          * is not yet enabled).
688                          */
689                         if (downstream) {
690                                 up_bw = link_speed * 1 * 1000;
691                                 down_bw = link_speed * 3 * 1000;
692                         } else {
693                                 up_bw = link_speed * 3 * 1000;
694                                 down_bw = link_speed * 1 * 1000;
695                         }
696                 } else {
697                         up_bw = link_speed * port->sw->link_width * 1000;
698                         down_bw = up_bw;
699                 }
700         } else {
701                 link_speed = tb_port_get_link_speed(port);
702                 if (link_speed < 0)
703                         return link_speed;
704
705                 link_width = tb_port_get_link_width(port);
706                 if (link_width < 0)
707                         return link_width;
708
709                 if (link_width == TB_LINK_WIDTH_ASYM_TX) {
710                         up_bw = link_speed * 1 * 1000;
711                         down_bw = link_speed * 3 * 1000;
712                 } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
713                         up_bw = link_speed * 3 * 1000;
714                         down_bw = link_speed * 1 * 1000;
715                 } else if (include_asym) {
716                         /*
717                          * The link is symmetric at the moment but we
718                          * can switch it to asymmetric as needed. Report
719                          * this bandwidth as available (even though it
720                          * is not yet enabled).
721                          */
722                         if (downstream) {
723                                 up_bw = link_speed * 1 * 1000;
724                                 down_bw = link_speed * 3 * 1000;
725                         } else {
726                                 up_bw = link_speed * 3 * 1000;
727                                 down_bw = link_speed * 1 * 1000;
728                         }
729                 } else {
730                         up_bw = link_speed * link_width * 1000;
731                         down_bw = up_bw;
732                 }
733         }
734
735         /* Leave 10% guard band */
736         *max_up = up_bw - up_bw / 10;
737         *max_down = down_bw - down_bw / 10;
738
739         tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
740         return 0;
741 }
742
743 /**
744  * tb_available_bandwidth() - Available bandwidth for tunneling
745  * @tb: Domain structure
746  * @src_port: Source protocol adapter
747  * @dst_port: Destination protocol adapter
748  * @available_up: Available bandwidth upstream (Mb/s)
749  * @available_down: Available bandwidth downstream (Mb/s)
750  * @include_asym: Include bandwidth if the link is switched from
751  *                symmetric to asymmetric
752  *
753  * Calculates maximum available bandwidth for protocol tunneling between
754  * @src_port and @dst_port at the moment. This is minimum of maximum
755  * link bandwidth across all links reduced by currently consumed
756  * bandwidth on that link.
757  *
758  * If @include_asym is true then includes also bandwidth that can be
759  * added when the links are transitioned into asymmetric (but does not
760  * transition the links).
761  */
762 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
763                                  struct tb_port *dst_port, int *available_up,
764                                  int *available_down, bool include_asym)
765 {
766         struct tb_port *port;
767         int ret;
768
769         /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
770         *available_up = *available_down = 120000;
771
772         /* Find the minimum available bandwidth over all links */
773         tb_for_each_port_on_path(src_port, dst_port, port) {
774                 int max_up, max_down, consumed_up, consumed_down;
775
776                 if (!tb_port_is_null(port))
777                         continue;
778
779                 ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
780                                            &max_up, &max_down, include_asym);
781                 if (ret)
782                         return ret;
783
784                 ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
785                                                       port, &consumed_up,
786                                                       &consumed_down);
787                 if (ret)
788                         return ret;
789                 max_up -= consumed_up;
790                 max_down -= consumed_down;
791
792                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
793                                                &consumed_up, &consumed_down);
794                 if (ret)
795                         return ret;
796                 max_up -= consumed_up;
797                 max_down -= consumed_down;
798
799                 if (max_up < *available_up)
800                         *available_up = max_up;
801                 if (max_down < *available_down)
802                         *available_down = max_down;
803         }
804
805         if (*available_up < 0)
806                 *available_up = 0;
807         if (*available_down < 0)
808                 *available_down = 0;
809
810         return 0;
811 }
812
813 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
814                                             struct tb_port *src_port,
815                                             struct tb_port *dst_port)
816 {
817         struct tb_tunnel *tunnel;
818
819         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
820         return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
821 }
822
823 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
824                                       struct tb_port *dst_port)
825 {
826         int ret, available_up, available_down;
827         struct tb_tunnel *tunnel;
828
829         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
830         if (!tunnel)
831                 return;
832
833         tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n");
834
835         /*
836          * Calculate available bandwidth for the first hop USB3 tunnel.
837          * That determines the whole USB3 bandwidth for this branch.
838          */
839         ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
840                                      &available_up, &available_down, false);
841         if (ret) {
842                 tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n");
843                 return;
844         }
845
846         tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up,
847                       available_down);
848
849         tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
850 }
851
852 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
853 {
854         struct tb_switch *parent = tb_switch_parent(sw);
855         int ret, available_up, available_down;
856         struct tb_port *up, *down, *port;
857         struct tb_cm *tcm = tb_priv(tb);
858         struct tb_tunnel *tunnel;
859
860         if (!tb_acpi_may_tunnel_usb3()) {
861                 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
862                 return 0;
863         }
864
865         up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
866         if (!up)
867                 return 0;
868
869         if (!sw->link_usb4)
870                 return 0;
871
872         /*
873          * Look up available down port. Since we are chaining it should
874          * be found right above this switch.
875          */
876         port = tb_switch_downstream_port(sw);
877         down = tb_find_usb3_down(parent, port);
878         if (!down)
879                 return 0;
880
881         if (tb_route(parent)) {
882                 struct tb_port *parent_up;
883                 /*
884                  * Check first that the parent switch has its upstream USB3
885                  * port enabled. Otherwise the chain is not complete and
886                  * there is no point setting up a new tunnel.
887                  */
888                 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
889                 if (!parent_up || !tb_port_is_enabled(parent_up))
890                         return 0;
891
892                 /* Make all unused bandwidth available for the new tunnel */
893                 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
894                 if (ret)
895                         return ret;
896         }
897
898         ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
899                                      false);
900         if (ret)
901                 goto err_reclaim;
902
903         tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
904                     available_up, available_down);
905
906         tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
907                                       available_down);
908         if (!tunnel) {
909                 ret = -ENOMEM;
910                 goto err_reclaim;
911         }
912
913         if (tb_tunnel_activate(tunnel)) {
914                 tb_port_info(up,
915                              "USB3 tunnel activation failed, aborting\n");
916                 ret = -EIO;
917                 goto err_free;
918         }
919
920         list_add_tail(&tunnel->list, &tcm->tunnel_list);
921         if (tb_route(parent))
922                 tb_reclaim_usb3_bandwidth(tb, down, up);
923
924         return 0;
925
926 err_free:
927         tb_tunnel_free(tunnel);
928 err_reclaim:
929         if (tb_route(parent))
930                 tb_reclaim_usb3_bandwidth(tb, down, up);
931
932         return ret;
933 }
934
935 static int tb_create_usb3_tunnels(struct tb_switch *sw)
936 {
937         struct tb_port *port;
938         int ret;
939
940         if (!tb_acpi_may_tunnel_usb3())
941                 return 0;
942
943         if (tb_route(sw)) {
944                 ret = tb_tunnel_usb3(sw->tb, sw);
945                 if (ret)
946                         return ret;
947         }
948
949         tb_switch_for_each_port(sw, port) {
950                 if (!tb_port_has_remote(port))
951                         continue;
952                 ret = tb_create_usb3_tunnels(port->remote->sw);
953                 if (ret)
954                         return ret;
955         }
956
957         return 0;
958 }
959
960 /**
961  * tb_configure_asym() - Transition links to asymmetric if needed
962  * @tb: Domain structure
963  * @src_port: Source adapter to start the transition
964  * @dst_port: Destination adapter
965  * @requested_up: Additional bandwidth (Mb/s) required upstream
966  * @requested_down: Additional bandwidth (Mb/s) required downstream
967  *
968  * Transition links between @src_port and @dst_port into asymmetric, with
969  * three lanes in the direction from @src_port towards @dst_port and one lane
970  * in the opposite direction, if the bandwidth requirements
971  * (requested + currently consumed) on that link exceed @asym_threshold.
972  *
973  * Must be called with available >= requested over all links.
974  */
975 static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
976                              struct tb_port *dst_port, int requested_up,
977                              int requested_down)
978 {
979         bool clx = false, clx_disabled = false, downstream;
980         struct tb_switch *sw;
981         struct tb_port *up;
982         int ret = 0;
983
984         if (!asym_threshold)
985                 return 0;
986
987         downstream = tb_port_path_direction_downstream(src_port, dst_port);
988         /* Pick up router deepest in the hierarchy */
989         if (downstream)
990                 sw = dst_port->sw;
991         else
992                 sw = src_port->sw;
993
994         tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
995                 struct tb_port *down = tb_switch_downstream_port(up->sw);
996                 enum tb_link_width width_up, width_down;
997                 int consumed_up, consumed_down;
998
999                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1000                                                &consumed_up, &consumed_down);
1001                 if (ret)
1002                         break;
1003
1004                 if (downstream) {
1005                         /*
1006                          * Downstream so make sure upstream is within the 36G
1007                          * (40G - guard band 10%), and the requested is above
1008                          * what the threshold is.
1009                          */
1010                         if (consumed_up + requested_up >= TB_ASYM_MIN) {
1011                                 ret = -ENOBUFS;
1012                                 break;
1013                         }
1014                         /* Does consumed + requested exceed the threshold */
1015                         if (consumed_down + requested_down < asym_threshold)
1016                                 continue;
1017
1018                         width_up = TB_LINK_WIDTH_ASYM_RX;
1019                         width_down = TB_LINK_WIDTH_ASYM_TX;
1020                 } else {
1021                         /* Upstream, the opposite of above */
1022                         if (consumed_down + requested_down >= TB_ASYM_MIN) {
1023                                 ret = -ENOBUFS;
1024                                 break;
1025                         }
1026                         if (consumed_up + requested_up < asym_threshold)
1027                                 continue;
1028
1029                         width_up = TB_LINK_WIDTH_ASYM_TX;
1030                         width_down = TB_LINK_WIDTH_ASYM_RX;
1031                 }
1032
1033                 if (up->sw->link_width == width_up)
1034                         continue;
1035
1036                 if (!tb_port_width_supported(up, width_up) ||
1037                     !tb_port_width_supported(down, width_down))
1038                         continue;
1039
1040                 /*
1041                  * Disable CL states before doing any transitions. We
1042                  * delayed it until now that we know there is a real
1043                  * transition taking place.
1044                  */
1045                 if (!clx_disabled) {
1046                         clx = tb_disable_clx(sw);
1047                         clx_disabled = true;
1048                 }
1049
1050                 tb_sw_dbg(up->sw, "configuring asymmetric link\n");
1051
1052                 /*
1053                  * Here requested + consumed > threshold so we need to
1054                  * transtion the link into asymmetric now.
1055                  */
1056                 ret = tb_switch_set_link_width(up->sw, width_up);
1057                 if (ret) {
1058                         tb_sw_warn(up->sw, "failed to set link width\n");
1059                         break;
1060                 }
1061         }
1062
1063         /* Re-enable CL states if they were previosly enabled */
1064         if (clx)
1065                 tb_enable_clx(sw);
1066
1067         return ret;
1068 }
1069
1070 /**
1071  * tb_configure_sym() - Transition links to symmetric if possible
1072  * @tb: Domain structure
1073  * @src_port: Source adapter to start the transition
1074  * @dst_port: Destination adapter
1075  * @keep_asym: Keep asymmetric link if preferred
1076  *
1077  * Goes over each link from @src_port to @dst_port and tries to
1078  * transition the link to symmetric if the currently consumed bandwidth
1079  * allows and link asymmetric preference is ignored (if @keep_asym is %false).
1080  */
1081 static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
1082                             struct tb_port *dst_port, bool keep_asym)
1083 {
1084         bool clx = false, clx_disabled = false, downstream;
1085         struct tb_switch *sw;
1086         struct tb_port *up;
1087         int ret = 0;
1088
1089         if (!asym_threshold)
1090                 return 0;
1091
1092         downstream = tb_port_path_direction_downstream(src_port, dst_port);
1093         /* Pick up router deepest in the hierarchy */
1094         if (downstream)
1095                 sw = dst_port->sw;
1096         else
1097                 sw = src_port->sw;
1098
1099         tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1100                 int consumed_up, consumed_down;
1101
1102                 /* Already symmetric */
1103                 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
1104                         continue;
1105                 /* Unplugged, no need to switch */
1106                 if (up->sw->is_unplugged)
1107                         continue;
1108
1109                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1110                                                &consumed_up, &consumed_down);
1111                 if (ret)
1112                         break;
1113
1114                 if (downstream) {
1115                         /*
1116                          * Downstream so we want the consumed_down < threshold.
1117                          * Upstream traffic should be less than 36G (40G
1118                          * guard band 10%) as the link was configured asymmetric
1119                          * already.
1120                          */
1121                         if (consumed_down >= asym_threshold)
1122                                 continue;
1123                 } else {
1124                         if (consumed_up >= asym_threshold)
1125                                 continue;
1126                 }
1127
1128                 if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
1129                         continue;
1130
1131                 /*
1132                  * Here consumed < threshold so we can transition the
1133                  * link to symmetric.
1134                  *
1135                  * However, if the router prefers asymmetric link we
1136                  * honor that (unless @keep_asym is %false).
1137                  */
1138                 if (keep_asym &&
1139                     up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) {
1140                         tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n");
1141                         continue;
1142                 }
1143
1144                 /* Disable CL states before doing any transitions */
1145                 if (!clx_disabled) {
1146                         clx = tb_disable_clx(sw);
1147                         clx_disabled = true;
1148                 }
1149
1150                 tb_sw_dbg(up->sw, "configuring symmetric link\n");
1151
1152                 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
1153                 if (ret) {
1154                         tb_sw_warn(up->sw, "failed to set link width\n");
1155                         break;
1156                 }
1157         }
1158
1159         /* Re-enable CL states if they were previosly enabled */
1160         if (clx)
1161                 tb_enable_clx(sw);
1162
1163         return ret;
1164 }
1165
1166 static void tb_configure_link(struct tb_port *down, struct tb_port *up,
1167                               struct tb_switch *sw)
1168 {
1169         struct tb *tb = sw->tb;
1170
1171         /* Link the routers using both links if available */
1172         down->remote = up;
1173         up->remote = down;
1174         if (down->dual_link_port && up->dual_link_port) {
1175                 down->dual_link_port->remote = up->dual_link_port;
1176                 up->dual_link_port->remote = down->dual_link_port;
1177         }
1178
1179         /*
1180          * Enable lane bonding if the link is currently two single lane
1181          * links.
1182          */
1183         if (sw->link_width < TB_LINK_WIDTH_DUAL)
1184                 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
1185
1186         /*
1187          * Device router that comes up as symmetric link is
1188          * connected deeper in the hierarchy, we transition the links
1189          * above into symmetric if bandwidth allows.
1190          */
1191         if (tb_switch_depth(sw) > 1 &&
1192             tb_port_get_link_generation(up) >= 4 &&
1193             up->sw->link_width == TB_LINK_WIDTH_DUAL) {
1194                 struct tb_port *host_port;
1195
1196                 host_port = tb_port_at(tb_route(sw), tb->root_switch);
1197                 tb_configure_sym(tb, host_port, up, false);
1198         }
1199
1200         /* Set the link configured */
1201         tb_switch_configure_link(sw);
1202 }
1203
1204 static void tb_scan_port(struct tb_port *port);
1205
1206 /*
1207  * tb_scan_switch() - scan for and initialize downstream switches
1208  */
1209 static void tb_scan_switch(struct tb_switch *sw)
1210 {
1211         struct tb_port *port;
1212
1213         pm_runtime_get_sync(&sw->dev);
1214
1215         tb_switch_for_each_port(sw, port)
1216                 tb_scan_port(port);
1217
1218         pm_runtime_mark_last_busy(&sw->dev);
1219         pm_runtime_put_autosuspend(&sw->dev);
1220 }
1221
1222 /*
1223  * tb_scan_port() - check for and initialize switches below port
1224  */
1225 static void tb_scan_port(struct tb_port *port)
1226 {
1227         struct tb_cm *tcm = tb_priv(port->sw->tb);
1228         struct tb_port *upstream_port;
1229         bool discovery = false;
1230         struct tb_switch *sw;
1231
1232         if (tb_is_upstream_port(port))
1233                 return;
1234
1235         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
1236             !tb_dp_port_is_enabled(port)) {
1237                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
1238                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
1239                                  false);
1240                 return;
1241         }
1242
1243         if (port->config.type != TB_TYPE_PORT)
1244                 return;
1245         if (port->dual_link_port && port->link_nr)
1246                 return; /*
1247                          * Downstream switch is reachable through two ports.
1248                          * Only scan on the primary port (link_nr == 0).
1249                          */
1250
1251         if (port->usb4)
1252                 pm_runtime_get_sync(&port->usb4->dev);
1253
1254         if (tb_wait_for_port(port, false) <= 0)
1255                 goto out_rpm_put;
1256         if (port->remote) {
1257                 tb_port_dbg(port, "port already has a remote\n");
1258                 goto out_rpm_put;
1259         }
1260
1261         tb_retimer_scan(port, true);
1262
1263         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
1264                              tb_downstream_route(port));
1265         if (IS_ERR(sw)) {
1266                 /*
1267                  * If there is an error accessing the connected switch
1268                  * it may be connected to another domain. Also we allow
1269                  * the other domain to be connected to a max depth switch.
1270                  */
1271                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
1272                         tb_scan_xdomain(port);
1273                 goto out_rpm_put;
1274         }
1275
1276         if (tb_switch_configure(sw)) {
1277                 tb_switch_put(sw);
1278                 goto out_rpm_put;
1279         }
1280
1281         /*
1282          * If there was previously another domain connected remove it
1283          * first.
1284          */
1285         if (port->xdomain) {
1286                 tb_xdomain_remove(port->xdomain);
1287                 tb_port_unconfigure_xdomain(port);
1288                 port->xdomain = NULL;
1289         }
1290
1291         /*
1292          * Do not send uevents until we have discovered all existing
1293          * tunnels and know which switches were authorized already by
1294          * the boot firmware.
1295          */
1296         if (!tcm->hotplug_active) {
1297                 dev_set_uevent_suppress(&sw->dev, true);
1298                 discovery = true;
1299         }
1300
1301         /*
1302          * At the moment Thunderbolt 2 and beyond (devices with LC) we
1303          * can support runtime PM.
1304          */
1305         sw->rpm = sw->generation > 1;
1306
1307         if (tb_switch_add(sw)) {
1308                 tb_switch_put(sw);
1309                 goto out_rpm_put;
1310         }
1311
1312         upstream_port = tb_upstream_port(sw);
1313         tb_configure_link(port, upstream_port, sw);
1314
1315         /*
1316          * CL0s and CL1 are enabled and supported together.
1317          * Silently ignore CLx enabling in case CLx is not supported.
1318          */
1319         if (discovery)
1320                 tb_sw_dbg(sw, "discovery, not touching CL states\n");
1321         else if (tb_enable_clx(sw))
1322                 tb_sw_warn(sw, "failed to enable CL states\n");
1323
1324         if (tb_enable_tmu(sw))
1325                 tb_sw_warn(sw, "failed to enable TMU\n");
1326
1327         /*
1328          * Configuration valid needs to be set after the TMU has been
1329          * enabled for the upstream port of the router so we do it here.
1330          */
1331         tb_switch_configuration_valid(sw);
1332
1333         /* Scan upstream retimers */
1334         tb_retimer_scan(upstream_port, true);
1335
1336         /*
1337          * Create USB 3.x tunnels only when the switch is plugged to the
1338          * domain. This is because we scan the domain also during discovery
1339          * and want to discover existing USB 3.x tunnels before we create
1340          * any new.
1341          */
1342         if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1343                 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1344
1345         tb_add_dp_resources(sw);
1346         tb_scan_switch(sw);
1347
1348 out_rpm_put:
1349         if (port->usb4) {
1350                 pm_runtime_mark_last_busy(&port->usb4->dev);
1351                 pm_runtime_put_autosuspend(&port->usb4->dev);
1352         }
1353 }
1354
1355 static void
1356 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1357 {
1358         struct tb_tunnel *first_tunnel;
1359         struct tb *tb = group->tb;
1360         struct tb_port *in;
1361         int ret;
1362
1363         tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1364                group->index);
1365
1366         first_tunnel = NULL;
1367         list_for_each_entry(in, &group->ports, group_list) {
1368                 int estimated_bw, estimated_up, estimated_down;
1369                 struct tb_tunnel *tunnel;
1370                 struct tb_port *out;
1371
1372                 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1373                         continue;
1374
1375                 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1376                 if (WARN_ON(!tunnel))
1377                         break;
1378
1379                 if (!first_tunnel) {
1380                         /*
1381                          * Since USB3 bandwidth is shared by all DP
1382                          * tunnels under the host router USB4 port, even
1383                          * if they do not begin from the host router, we
1384                          * can release USB3 bandwidth just once and not
1385                          * for each tunnel separately.
1386                          */
1387                         first_tunnel = tunnel;
1388                         ret = tb_release_unused_usb3_bandwidth(tb,
1389                                 first_tunnel->src_port, first_tunnel->dst_port);
1390                         if (ret) {
1391                                 tb_tunnel_warn(tunnel,
1392                                         "failed to release unused bandwidth\n");
1393                                 break;
1394                         }
1395                 }
1396
1397                 out = tunnel->dst_port;
1398                 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1399                                              &estimated_down, true);
1400                 if (ret) {
1401                         tb_tunnel_warn(tunnel,
1402                                 "failed to re-calculate estimated bandwidth\n");
1403                         break;
1404                 }
1405
1406                 /*
1407                  * Estimated bandwidth includes:
1408                  *  - already allocated bandwidth for the DP tunnel
1409                  *  - available bandwidth along the path
1410                  *  - bandwidth allocated for USB 3.x but not used.
1411                  */
1412                 if (tb_tunnel_direction_downstream(tunnel))
1413                         estimated_bw = estimated_down;
1414                 else
1415                         estimated_bw = estimated_up;
1416
1417                 /*
1418                  * If there is reserved bandwidth for the group that is
1419                  * not yet released we report that too.
1420                  */
1421                 tb_tunnel_dbg(tunnel,
1422                               "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n",
1423                               estimated_bw, group->reserved,
1424                               estimated_bw + group->reserved);
1425
1426                 if (usb4_dp_port_set_estimated_bandwidth(in,
1427                                 estimated_bw + group->reserved))
1428                         tb_tunnel_warn(tunnel,
1429                                        "failed to update estimated bandwidth\n");
1430         }
1431
1432         if (first_tunnel)
1433                 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1434                                           first_tunnel->dst_port);
1435
1436         tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1437 }
1438
1439 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1440 {
1441         struct tb_cm *tcm = tb_priv(tb);
1442         int i;
1443
1444         tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1445
1446         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1447                 struct tb_bandwidth_group *group = &tcm->groups[i];
1448
1449                 if (!list_empty(&group->ports))
1450                         tb_recalc_estimated_bandwidth_for_group(group);
1451         }
1452
1453         tb_dbg(tb, "bandwidth re-calculation done\n");
1454 }
1455
1456 static bool __release_group_bandwidth(struct tb_bandwidth_group *group)
1457 {
1458         if (group->reserved) {
1459                 tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index,
1460                         group->reserved);
1461                 group->reserved = 0;
1462                 return true;
1463         }
1464         return false;
1465 }
1466
1467 static void __configure_group_sym(struct tb_bandwidth_group *group)
1468 {
1469         struct tb_tunnel *tunnel;
1470         struct tb_port *in;
1471
1472         if (list_empty(&group->ports))
1473                 return;
1474
1475         /*
1476          * All the tunnels in the group go through the same USB4 links
1477          * so we find the first one here and pass the IN and OUT
1478          * adapters to tb_configure_sym() which now transitions the
1479          * links back to symmetric if bandwidth requirement < asym_threshold.
1480          *
1481          * We do this here to avoid unnecessary transitions (for example
1482          * if the graphics released bandwidth for other tunnel in the
1483          * same group).
1484          */
1485         in = list_first_entry(&group->ports, struct tb_port, group_list);
1486         tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL);
1487         if (tunnel)
1488                 tb_configure_sym(group->tb, in, tunnel->dst_port, true);
1489 }
1490
1491 static void tb_bandwidth_group_release_work(struct work_struct *work)
1492 {
1493         struct tb_bandwidth_group *group =
1494                 container_of(work, typeof(*group), release_work.work);
1495         struct tb *tb = group->tb;
1496
1497         mutex_lock(&tb->lock);
1498         if (__release_group_bandwidth(group))
1499                 tb_recalc_estimated_bandwidth(tb);
1500         __configure_group_sym(group);
1501         mutex_unlock(&tb->lock);
1502 }
1503
1504 static void tb_init_bandwidth_groups(struct tb_cm *tcm)
1505 {
1506         int i;
1507
1508         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1509                 struct tb_bandwidth_group *group = &tcm->groups[i];
1510
1511                 group->tb = tcm_to_tb(tcm);
1512                 group->index = i + 1;
1513                 INIT_LIST_HEAD(&group->ports);
1514                 INIT_DELAYED_WORK(&group->release_work,
1515                                   tb_bandwidth_group_release_work);
1516         }
1517 }
1518
1519 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
1520                                            struct tb_port *in)
1521 {
1522         if (!group || WARN_ON(in->group))
1523                 return;
1524
1525         in->group = group;
1526         list_add_tail(&in->group_list, &group->ports);
1527
1528         tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
1529 }
1530
1531 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
1532 {
1533         int i;
1534
1535         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1536                 struct tb_bandwidth_group *group = &tcm->groups[i];
1537
1538                 if (list_empty(&group->ports))
1539                         return group;
1540         }
1541
1542         return NULL;
1543 }
1544
1545 static struct tb_bandwidth_group *
1546 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1547                           struct tb_port *out)
1548 {
1549         struct tb_bandwidth_group *group;
1550         struct tb_tunnel *tunnel;
1551
1552         /*
1553          * Find all DP tunnels that go through all the same USB4 links
1554          * as this one. Because we always setup tunnels the same way we
1555          * can just check for the routers at both ends of the tunnels
1556          * and if they are the same we have a match.
1557          */
1558         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1559                 if (!tb_tunnel_is_dp(tunnel))
1560                         continue;
1561
1562                 if (tunnel->src_port->sw == in->sw &&
1563                     tunnel->dst_port->sw == out->sw) {
1564                         group = tunnel->src_port->group;
1565                         if (group) {
1566                                 tb_bandwidth_group_attach_port(group, in);
1567                                 return group;
1568                         }
1569                 }
1570         }
1571
1572         /* Pick up next available group then */
1573         group = tb_find_free_bandwidth_group(tcm);
1574         if (group)
1575                 tb_bandwidth_group_attach_port(group, in);
1576         else
1577                 tb_port_warn(in, "no available bandwidth groups\n");
1578
1579         return group;
1580 }
1581
1582 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1583                                         struct tb_port *out)
1584 {
1585         if (usb4_dp_port_bandwidth_mode_enabled(in)) {
1586                 int index, i;
1587
1588                 index = usb4_dp_port_group_id(in);
1589                 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1590                         if (tcm->groups[i].index == index) {
1591                                 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
1592                                 return;
1593                         }
1594                 }
1595         }
1596
1597         tb_attach_bandwidth_group(tcm, in, out);
1598 }
1599
1600 static void tb_detach_bandwidth_group(struct tb_port *in)
1601 {
1602         struct tb_bandwidth_group *group = in->group;
1603
1604         if (group) {
1605                 in->group = NULL;
1606                 list_del_init(&in->group_list);
1607
1608                 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
1609
1610                 /* No more tunnels so release the reserved bandwidth if any */
1611                 if (list_empty(&group->ports)) {
1612                         cancel_delayed_work(&group->release_work);
1613                         __release_group_bandwidth(group);
1614                 }
1615         }
1616 }
1617
1618 static void tb_discover_tunnels(struct tb *tb)
1619 {
1620         struct tb_cm *tcm = tb_priv(tb);
1621         struct tb_tunnel *tunnel;
1622
1623         tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
1624
1625         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1626                 if (tb_tunnel_is_pci(tunnel)) {
1627                         struct tb_switch *parent = tunnel->dst_port->sw;
1628
1629                         while (parent != tunnel->src_port->sw) {
1630                                 parent->boot = true;
1631                                 parent = tb_switch_parent(parent);
1632                         }
1633                 } else if (tb_tunnel_is_dp(tunnel)) {
1634                         struct tb_port *in = tunnel->src_port;
1635                         struct tb_port *out = tunnel->dst_port;
1636
1637                         /* Keep the domain from powering down */
1638                         pm_runtime_get_sync(&in->sw->dev);
1639                         pm_runtime_get_sync(&out->sw->dev);
1640
1641                         tb_discover_bandwidth_group(tcm, in, out);
1642                 }
1643         }
1644 }
1645
1646 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
1647 {
1648         struct tb_port *src_port, *dst_port;
1649         struct tb *tb;
1650
1651         if (!tunnel)
1652                 return;
1653
1654         tb_tunnel_deactivate(tunnel);
1655         list_del(&tunnel->list);
1656
1657         tb = tunnel->tb;
1658         src_port = tunnel->src_port;
1659         dst_port = tunnel->dst_port;
1660
1661         switch (tunnel->type) {
1662         case TB_TUNNEL_DP:
1663                 tb_detach_bandwidth_group(src_port);
1664                 /*
1665                  * In case of DP tunnel make sure the DP IN resource is
1666                  * deallocated properly.
1667                  */
1668                 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
1669                 /*
1670                  * If bandwidth on a link is < asym_threshold
1671                  * transition the link to symmetric.
1672                  */
1673                 tb_configure_sym(tb, src_port, dst_port, true);
1674                 /* Now we can allow the domain to runtime suspend again */
1675                 pm_runtime_mark_last_busy(&dst_port->sw->dev);
1676                 pm_runtime_put_autosuspend(&dst_port->sw->dev);
1677                 pm_runtime_mark_last_busy(&src_port->sw->dev);
1678                 pm_runtime_put_autosuspend(&src_port->sw->dev);
1679                 fallthrough;
1680
1681         case TB_TUNNEL_USB3:
1682                 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
1683                 break;
1684
1685         default:
1686                 /*
1687                  * PCIe and DMA tunnels do not consume guaranteed
1688                  * bandwidth.
1689                  */
1690                 break;
1691         }
1692
1693         tb_tunnel_free(tunnel);
1694 }
1695
1696 /*
1697  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
1698  */
1699 static void tb_free_invalid_tunnels(struct tb *tb)
1700 {
1701         struct tb_cm *tcm = tb_priv(tb);
1702         struct tb_tunnel *tunnel;
1703         struct tb_tunnel *n;
1704
1705         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1706                 if (tb_tunnel_is_invalid(tunnel))
1707                         tb_deactivate_and_free_tunnel(tunnel);
1708         }
1709 }
1710
1711 /*
1712  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
1713  */
1714 static void tb_free_unplugged_children(struct tb_switch *sw)
1715 {
1716         struct tb_port *port;
1717
1718         tb_switch_for_each_port(sw, port) {
1719                 if (!tb_port_has_remote(port))
1720                         continue;
1721
1722                 if (port->remote->sw->is_unplugged) {
1723                         tb_retimer_remove_all(port);
1724                         tb_remove_dp_resources(port->remote->sw);
1725                         tb_switch_unconfigure_link(port->remote->sw);
1726                         tb_switch_set_link_width(port->remote->sw,
1727                                                  TB_LINK_WIDTH_SINGLE);
1728                         tb_switch_remove(port->remote->sw);
1729                         port->remote = NULL;
1730                         if (port->dual_link_port)
1731                                 port->dual_link_port->remote = NULL;
1732                 } else {
1733                         tb_free_unplugged_children(port->remote->sw);
1734                 }
1735         }
1736 }
1737
1738 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
1739                                          const struct tb_port *port)
1740 {
1741         struct tb_port *down = NULL;
1742
1743         /*
1744          * To keep plugging devices consistently in the same PCIe
1745          * hierarchy, do mapping here for switch downstream PCIe ports.
1746          */
1747         if (tb_switch_is_usb4(sw)) {
1748                 down = usb4_switch_map_pcie_down(sw, port);
1749         } else if (!tb_route(sw)) {
1750                 int phy_port = tb_phy_port_from_link(port->port);
1751                 int index;
1752
1753                 /*
1754                  * Hard-coded Thunderbolt port to PCIe down port mapping
1755                  * per controller.
1756                  */
1757                 if (tb_switch_is_cactus_ridge(sw) ||
1758                     tb_switch_is_alpine_ridge(sw))
1759                         index = !phy_port ? 6 : 7;
1760                 else if (tb_switch_is_falcon_ridge(sw))
1761                         index = !phy_port ? 6 : 8;
1762                 else if (tb_switch_is_titan_ridge(sw))
1763                         index = !phy_port ? 8 : 9;
1764                 else
1765                         goto out;
1766
1767                 /* Validate the hard-coding */
1768                 if (WARN_ON(index > sw->config.max_port_number))
1769                         goto out;
1770
1771                 down = &sw->ports[index];
1772         }
1773
1774         if (down) {
1775                 if (WARN_ON(!tb_port_is_pcie_down(down)))
1776                         goto out;
1777                 if (tb_pci_port_is_enabled(down))
1778                         goto out;
1779
1780                 return down;
1781         }
1782
1783 out:
1784         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
1785 }
1786
1787 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1788 {
1789         struct tb_port *host_port, *port;
1790         struct tb_cm *tcm = tb_priv(tb);
1791
1792         host_port = tb_route(in->sw) ?
1793                 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1794
1795         list_for_each_entry(port, &tcm->dp_resources, list) {
1796                 if (!tb_port_is_dpout(port))
1797                         continue;
1798
1799                 if (tb_port_is_enabled(port)) {
1800                         tb_port_dbg(port, "DP OUT in use\n");
1801                         continue;
1802                 }
1803
1804                 /* Needs to be on different routers */
1805                 if (in->sw == port->sw) {
1806                         tb_port_dbg(port, "skipping DP OUT on same router\n");
1807                         continue;
1808                 }
1809
1810                 tb_port_dbg(port, "DP OUT available\n");
1811
1812                 /*
1813                  * Keep the DP tunnel under the topology starting from
1814                  * the same host router downstream port.
1815                  */
1816                 if (host_port && tb_route(port->sw)) {
1817                         struct tb_port *p;
1818
1819                         p = tb_port_at(tb_route(port->sw), tb->root_switch);
1820                         if (p != host_port)
1821                                 continue;
1822                 }
1823
1824                 return port;
1825         }
1826
1827         return NULL;
1828 }
1829
1830 static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
1831                              struct tb_port *out)
1832 {
1833         int available_up, available_down, ret, link_nr;
1834         struct tb_cm *tcm = tb_priv(tb);
1835         int consumed_up, consumed_down;
1836         struct tb_tunnel *tunnel;
1837
1838         /*
1839          * This is only applicable to links that are not bonded (so
1840          * when Thunderbolt 1 hardware is involved somewhere in the
1841          * topology). For these try to share the DP bandwidth between
1842          * the two lanes.
1843          */
1844         link_nr = 1;
1845         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1846                 if (tb_tunnel_is_dp(tunnel)) {
1847                         link_nr = 0;
1848                         break;
1849                 }
1850         }
1851
1852         /*
1853          * DP stream needs the domain to be active so runtime resume
1854          * both ends of the tunnel.
1855          *
1856          * This should bring the routers in the middle active as well
1857          * and keeps the domain from runtime suspending while the DP
1858          * tunnel is active.
1859          */
1860         pm_runtime_get_sync(&in->sw->dev);
1861         pm_runtime_get_sync(&out->sw->dev);
1862
1863         if (tb_switch_alloc_dp_resource(in->sw, in)) {
1864                 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
1865                 goto err_rpm_put;
1866         }
1867
1868         if (!tb_attach_bandwidth_group(tcm, in, out))
1869                 goto err_dealloc_dp;
1870
1871         /* Make all unused USB3 bandwidth available for the new DP tunnel */
1872         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1873         if (ret) {
1874                 tb_warn(tb, "failed to release unused bandwidth\n");
1875                 goto err_detach_group;
1876         }
1877
1878         ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
1879                                      true);
1880         if (ret)
1881                 goto err_reclaim_usb;
1882
1883         tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1884                available_up, available_down);
1885
1886         tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1887                                     available_down);
1888         if (!tunnel) {
1889                 tb_port_dbg(out, "could not allocate DP tunnel\n");
1890                 goto err_reclaim_usb;
1891         }
1892
1893         if (tb_tunnel_activate(tunnel)) {
1894                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1895                 goto err_free;
1896         }
1897
1898         /* If fail reading tunnel's consumed bandwidth, tear it down */
1899         ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
1900         if (ret)
1901                 goto err_deactivate;
1902
1903         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1904
1905         tb_reclaim_usb3_bandwidth(tb, in, out);
1906         /*
1907          * Transition the links to asymmetric if the consumption exceeds
1908          * the threshold.
1909          */
1910         tb_configure_asym(tb, in, out, consumed_up, consumed_down);
1911
1912         /* Update the domain with the new bandwidth estimation */
1913         tb_recalc_estimated_bandwidth(tb);
1914
1915         /*
1916          * In case of DP tunnel exists, change host router's 1st children
1917          * TMU mode to HiFi for CL0s to work.
1918          */
1919         tb_increase_tmu_accuracy(tunnel);
1920         return true;
1921
1922 err_deactivate:
1923         tb_tunnel_deactivate(tunnel);
1924 err_free:
1925         tb_tunnel_free(tunnel);
1926 err_reclaim_usb:
1927         tb_reclaim_usb3_bandwidth(tb, in, out);
1928 err_detach_group:
1929         tb_detach_bandwidth_group(in);
1930 err_dealloc_dp:
1931         tb_switch_dealloc_dp_resource(in->sw, in);
1932 err_rpm_put:
1933         pm_runtime_mark_last_busy(&out->sw->dev);
1934         pm_runtime_put_autosuspend(&out->sw->dev);
1935         pm_runtime_mark_last_busy(&in->sw->dev);
1936         pm_runtime_put_autosuspend(&in->sw->dev);
1937
1938         return false;
1939 }
1940
1941 static void tb_tunnel_dp(struct tb *tb)
1942 {
1943         struct tb_cm *tcm = tb_priv(tb);
1944         struct tb_port *port, *in, *out;
1945
1946         if (!tb_acpi_may_tunnel_dp()) {
1947                 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1948                 return;
1949         }
1950
1951         /*
1952          * Find pair of inactive DP IN and DP OUT adapters and then
1953          * establish a DP tunnel between them.
1954          */
1955         tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1956
1957         in = NULL;
1958         out = NULL;
1959         list_for_each_entry(port, &tcm->dp_resources, list) {
1960                 if (!tb_port_is_dpin(port))
1961                         continue;
1962
1963                 if (tb_port_is_enabled(port)) {
1964                         tb_port_dbg(port, "DP IN in use\n");
1965                         continue;
1966                 }
1967
1968                 in = port;
1969                 tb_port_dbg(in, "DP IN available\n");
1970
1971                 out = tb_find_dp_out(tb, port);
1972                 if (out)
1973                         tb_tunnel_one_dp(tb, in, out);
1974                 else
1975                         tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
1976         }
1977
1978         if (!in)
1979                 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1980 }
1981
1982 static void tb_enter_redrive(struct tb_port *port)
1983 {
1984         struct tb_switch *sw = port->sw;
1985
1986         if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
1987                 return;
1988
1989         /*
1990          * If we get hot-unplug for the DP IN port of the host router
1991          * and the DP resource is not available anymore it means there
1992          * is a monitor connected directly to the Type-C port and we are
1993          * in "redrive" mode. For this to work we cannot enter RTD3 so
1994          * we bump up the runtime PM reference count here.
1995          */
1996         if (!tb_port_is_dpin(port))
1997                 return;
1998         if (tb_route(sw))
1999                 return;
2000         if (!tb_switch_query_dp_resource(sw, port)) {
2001                 port->redrive = true;
2002                 pm_runtime_get(&sw->dev);
2003                 tb_port_dbg(port, "enter redrive mode, keeping powered\n");
2004         }
2005 }
2006
2007 static void tb_exit_redrive(struct tb_port *port)
2008 {
2009         struct tb_switch *sw = port->sw;
2010
2011         if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
2012                 return;
2013
2014         if (!tb_port_is_dpin(port))
2015                 return;
2016         if (tb_route(sw))
2017                 return;
2018         if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
2019                 port->redrive = false;
2020                 pm_runtime_put(&sw->dev);
2021                 tb_port_dbg(port, "exit redrive mode\n");
2022         }
2023 }
2024
2025 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
2026 {
2027         struct tb_port *in, *out;
2028         struct tb_tunnel *tunnel;
2029
2030         if (tb_port_is_dpin(port)) {
2031                 tb_port_dbg(port, "DP IN resource unavailable\n");
2032                 in = port;
2033                 out = NULL;
2034         } else {
2035                 tb_port_dbg(port, "DP OUT resource unavailable\n");
2036                 in = NULL;
2037                 out = port;
2038         }
2039
2040         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
2041         if (tunnel)
2042                 tb_deactivate_and_free_tunnel(tunnel);
2043         else
2044                 tb_enter_redrive(port);
2045         list_del_init(&port->list);
2046
2047         /*
2048          * See if there is another DP OUT port that can be used for
2049          * to create another tunnel.
2050          */
2051         tb_recalc_estimated_bandwidth(tb);
2052         tb_tunnel_dp(tb);
2053 }
2054
2055 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
2056 {
2057         struct tb_cm *tcm = tb_priv(tb);
2058         struct tb_port *p;
2059
2060         if (tb_port_is_enabled(port))
2061                 return;
2062
2063         list_for_each_entry(p, &tcm->dp_resources, list) {
2064                 if (p == port)
2065                         return;
2066         }
2067
2068         tb_port_dbg(port, "DP %s resource available after hotplug\n",
2069                     tb_port_is_dpin(port) ? "IN" : "OUT");
2070         list_add_tail(&port->list, &tcm->dp_resources);
2071         tb_exit_redrive(port);
2072
2073         /* Look for suitable DP IN <-> DP OUT pairs now */
2074         tb_tunnel_dp(tb);
2075 }
2076
2077 static void tb_disconnect_and_release_dp(struct tb *tb)
2078 {
2079         struct tb_cm *tcm = tb_priv(tb);
2080         struct tb_tunnel *tunnel, *n;
2081
2082         /*
2083          * Tear down all DP tunnels and release their resources. They
2084          * will be re-established after resume based on plug events.
2085          */
2086         list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
2087                 if (tb_tunnel_is_dp(tunnel))
2088                         tb_deactivate_and_free_tunnel(tunnel);
2089         }
2090
2091         while (!list_empty(&tcm->dp_resources)) {
2092                 struct tb_port *port;
2093
2094                 port = list_first_entry(&tcm->dp_resources,
2095                                         struct tb_port, list);
2096                 list_del_init(&port->list);
2097         }
2098 }
2099
2100 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
2101 {
2102         struct tb_tunnel *tunnel;
2103         struct tb_port *up;
2104
2105         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
2106         if (WARN_ON(!up))
2107                 return -ENODEV;
2108
2109         tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
2110         if (WARN_ON(!tunnel))
2111                 return -ENODEV;
2112
2113         tb_switch_xhci_disconnect(sw);
2114
2115         tb_tunnel_deactivate(tunnel);
2116         list_del(&tunnel->list);
2117         tb_tunnel_free(tunnel);
2118         return 0;
2119 }
2120
2121 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
2122 {
2123         struct tb_port *up, *down, *port;
2124         struct tb_cm *tcm = tb_priv(tb);
2125         struct tb_tunnel *tunnel;
2126
2127         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
2128         if (!up)
2129                 return 0;
2130
2131         /*
2132          * Look up available down port. Since we are chaining it should
2133          * be found right above this switch.
2134          */
2135         port = tb_switch_downstream_port(sw);
2136         down = tb_find_pcie_down(tb_switch_parent(sw), port);
2137         if (!down)
2138                 return 0;
2139
2140         tunnel = tb_tunnel_alloc_pci(tb, up, down);
2141         if (!tunnel)
2142                 return -ENOMEM;
2143
2144         if (tb_tunnel_activate(tunnel)) {
2145                 tb_port_info(up,
2146                              "PCIe tunnel activation failed, aborting\n");
2147                 tb_tunnel_free(tunnel);
2148                 return -EIO;
2149         }
2150
2151         /*
2152          * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
2153          * here.
2154          */
2155         if (tb_switch_pcie_l1_enable(sw))
2156                 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
2157
2158         if (tb_switch_xhci_connect(sw))
2159                 tb_sw_warn(sw, "failed to connect xHCI\n");
2160
2161         list_add_tail(&tunnel->list, &tcm->tunnel_list);
2162         return 0;
2163 }
2164
2165 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2166                                     int transmit_path, int transmit_ring,
2167                                     int receive_path, int receive_ring)
2168 {
2169         struct tb_cm *tcm = tb_priv(tb);
2170         struct tb_port *nhi_port, *dst_port;
2171         struct tb_tunnel *tunnel;
2172         struct tb_switch *sw;
2173         int ret;
2174
2175         sw = tb_to_switch(xd->dev.parent);
2176         dst_port = tb_port_at(xd->route, sw);
2177         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2178
2179         mutex_lock(&tb->lock);
2180
2181         /*
2182          * When tunneling DMA paths the link should not enter CL states
2183          * so disable them now.
2184          */
2185         tb_disable_clx(sw);
2186
2187         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
2188                                      transmit_ring, receive_path, receive_ring);
2189         if (!tunnel) {
2190                 ret = -ENOMEM;
2191                 goto err_clx;
2192         }
2193
2194         if (tb_tunnel_activate(tunnel)) {
2195                 tb_port_info(nhi_port,
2196                              "DMA tunnel activation failed, aborting\n");
2197                 ret = -EIO;
2198                 goto err_free;
2199         }
2200
2201         list_add_tail(&tunnel->list, &tcm->tunnel_list);
2202         mutex_unlock(&tb->lock);
2203         return 0;
2204
2205 err_free:
2206         tb_tunnel_free(tunnel);
2207 err_clx:
2208         tb_enable_clx(sw);
2209         mutex_unlock(&tb->lock);
2210
2211         return ret;
2212 }
2213
2214 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2215                                           int transmit_path, int transmit_ring,
2216                                           int receive_path, int receive_ring)
2217 {
2218         struct tb_cm *tcm = tb_priv(tb);
2219         struct tb_port *nhi_port, *dst_port;
2220         struct tb_tunnel *tunnel, *n;
2221         struct tb_switch *sw;
2222
2223         sw = tb_to_switch(xd->dev.parent);
2224         dst_port = tb_port_at(xd->route, sw);
2225         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2226
2227         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2228                 if (!tb_tunnel_is_dma(tunnel))
2229                         continue;
2230                 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
2231                         continue;
2232
2233                 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
2234                                         receive_path, receive_ring))
2235                         tb_deactivate_and_free_tunnel(tunnel);
2236         }
2237
2238         /*
2239          * Try to re-enable CL states now, it is OK if this fails
2240          * because we may still have another DMA tunnel active through
2241          * the same host router USB4 downstream port.
2242          */
2243         tb_enable_clx(sw);
2244 }
2245
2246 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2247                                        int transmit_path, int transmit_ring,
2248                                        int receive_path, int receive_ring)
2249 {
2250         if (!xd->is_unplugged) {
2251                 mutex_lock(&tb->lock);
2252                 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
2253                                               transmit_ring, receive_path,
2254                                               receive_ring);
2255                 mutex_unlock(&tb->lock);
2256         }
2257         return 0;
2258 }
2259
2260 /* hotplug handling */
2261
2262 /*
2263  * tb_handle_hotplug() - handle hotplug event
2264  *
2265  * Executes on tb->wq.
2266  */
2267 static void tb_handle_hotplug(struct work_struct *work)
2268 {
2269         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2270         struct tb *tb = ev->tb;
2271         struct tb_cm *tcm = tb_priv(tb);
2272         struct tb_switch *sw;
2273         struct tb_port *port;
2274
2275         /* Bring the domain back from sleep if it was suspended */
2276         pm_runtime_get_sync(&tb->dev);
2277
2278         mutex_lock(&tb->lock);
2279         if (!tcm->hotplug_active)
2280                 goto out; /* during init, suspend or shutdown */
2281
2282         sw = tb_switch_find_by_route(tb, ev->route);
2283         if (!sw) {
2284                 tb_warn(tb,
2285                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
2286                         ev->route, ev->port, ev->unplug);
2287                 goto out;
2288         }
2289         if (ev->port > sw->config.max_port_number) {
2290                 tb_warn(tb,
2291                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
2292                         ev->route, ev->port, ev->unplug);
2293                 goto put_sw;
2294         }
2295         port = &sw->ports[ev->port];
2296         if (tb_is_upstream_port(port)) {
2297                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
2298                        ev->route, ev->port, ev->unplug);
2299                 goto put_sw;
2300         }
2301
2302         pm_runtime_get_sync(&sw->dev);
2303
2304         if (ev->unplug) {
2305                 tb_retimer_remove_all(port);
2306
2307                 if (tb_port_has_remote(port)) {
2308                         tb_port_dbg(port, "switch unplugged\n");
2309                         tb_sw_set_unplugged(port->remote->sw);
2310                         tb_free_invalid_tunnels(tb);
2311                         tb_remove_dp_resources(port->remote->sw);
2312                         tb_switch_tmu_disable(port->remote->sw);
2313                         tb_switch_unconfigure_link(port->remote->sw);
2314                         tb_switch_set_link_width(port->remote->sw,
2315                                                  TB_LINK_WIDTH_SINGLE);
2316                         tb_switch_remove(port->remote->sw);
2317                         port->remote = NULL;
2318                         if (port->dual_link_port)
2319                                 port->dual_link_port->remote = NULL;
2320                         /* Maybe we can create another DP tunnel */
2321                         tb_recalc_estimated_bandwidth(tb);
2322                         tb_tunnel_dp(tb);
2323                 } else if (port->xdomain) {
2324                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
2325
2326                         tb_port_dbg(port, "xdomain unplugged\n");
2327                         /*
2328                          * Service drivers are unbound during
2329                          * tb_xdomain_remove() so setting XDomain as
2330                          * unplugged here prevents deadlock if they call
2331                          * tb_xdomain_disable_paths(). We will tear down
2332                          * all the tunnels below.
2333                          */
2334                         xd->is_unplugged = true;
2335                         tb_xdomain_remove(xd);
2336                         port->xdomain = NULL;
2337                         __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
2338                         tb_xdomain_put(xd);
2339                         tb_port_unconfigure_xdomain(port);
2340                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2341                         tb_dp_resource_unavailable(tb, port);
2342                 } else if (!port->port) {
2343                         tb_sw_dbg(sw, "xHCI disconnect request\n");
2344                         tb_switch_xhci_disconnect(sw);
2345                 } else {
2346                         tb_port_dbg(port,
2347                                    "got unplug event for disconnected port, ignoring\n");
2348                 }
2349         } else if (port->remote) {
2350                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
2351         } else if (!port->port && sw->authorized) {
2352                 tb_sw_dbg(sw, "xHCI connect request\n");
2353                 tb_switch_xhci_connect(sw);
2354         } else {
2355                 if (tb_port_is_null(port)) {
2356                         tb_port_dbg(port, "hotplug: scanning\n");
2357                         tb_scan_port(port);
2358                         if (!port->remote)
2359                                 tb_port_dbg(port, "hotplug: no switch found\n");
2360                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2361                         tb_dp_resource_available(tb, port);
2362                 }
2363         }
2364
2365         pm_runtime_mark_last_busy(&sw->dev);
2366         pm_runtime_put_autosuspend(&sw->dev);
2367
2368 put_sw:
2369         tb_switch_put(sw);
2370 out:
2371         mutex_unlock(&tb->lock);
2372
2373         pm_runtime_mark_last_busy(&tb->dev);
2374         pm_runtime_put_autosuspend(&tb->dev);
2375
2376         kfree(ev);
2377 }
2378
2379 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
2380                                  int *requested_down)
2381 {
2382         int allocated_up, allocated_down, available_up, available_down, ret;
2383         int requested_up_corrected, requested_down_corrected, granularity;
2384         int max_up, max_down, max_up_rounded, max_down_rounded;
2385         struct tb_bandwidth_group *group;
2386         struct tb *tb = tunnel->tb;
2387         struct tb_port *in, *out;
2388         bool downstream;
2389
2390         ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
2391         if (ret)
2392                 return ret;
2393
2394         in = tunnel->src_port;
2395         out = tunnel->dst_port;
2396
2397         tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n",
2398                       allocated_up, allocated_down);
2399
2400         /*
2401          * If we get rounded up request from graphics side, say HBR2 x 4
2402          * that is 17500 instead of 17280 (this is because of the
2403          * granularity), we allow it too. Here the graphics has already
2404          * negotiated with the DPRX the maximum possible rates (which is
2405          * 17280 in this case).
2406          *
2407          * Since the link cannot go higher than 17280 we use that in our
2408          * calculations but the DP IN adapter Allocated BW write must be
2409          * the same value (17500) otherwise the adapter will mark it as
2410          * failed for graphics.
2411          */
2412         ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
2413         if (ret)
2414                 goto fail;
2415
2416         ret = usb4_dp_port_granularity(in);
2417         if (ret < 0)
2418                 goto fail;
2419         granularity = ret;
2420
2421         max_up_rounded = roundup(max_up, granularity);
2422         max_down_rounded = roundup(max_down, granularity);
2423
2424         /*
2425          * This will "fix" the request down to the maximum supported
2426          * rate * lanes if it is at the maximum rounded up level.
2427          */
2428         requested_up_corrected = *requested_up;
2429         if (requested_up_corrected == max_up_rounded)
2430                 requested_up_corrected = max_up;
2431         else if (requested_up_corrected < 0)
2432                 requested_up_corrected = 0;
2433         requested_down_corrected = *requested_down;
2434         if (requested_down_corrected == max_down_rounded)
2435                 requested_down_corrected = max_down;
2436         else if (requested_down_corrected < 0)
2437                 requested_down_corrected = 0;
2438
2439         tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n",
2440                       requested_up_corrected, requested_down_corrected);
2441
2442         if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
2443             (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
2444                 tb_tunnel_dbg(tunnel,
2445                               "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
2446                               requested_up_corrected, requested_down_corrected,
2447                               max_up_rounded, max_down_rounded);
2448                 ret = -ENOBUFS;
2449                 goto fail;
2450         }
2451
2452         downstream = tb_tunnel_direction_downstream(tunnel);
2453         group = in->group;
2454
2455         if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
2456             (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
2457                 if (tunnel->bw_mode) {
2458                         int reserved;
2459                         /*
2460                          * If requested bandwidth is less or equal than
2461                          * what is currently allocated to that tunnel we
2462                          * simply change the reservation of the tunnel
2463                          * and add the released bandwidth for the group
2464                          * for the next 10s. Then we release it for
2465                          * others to use.
2466                          */
2467                         if (downstream)
2468                                 reserved = allocated_down - *requested_down;
2469                         else
2470                                 reserved = allocated_up - *requested_up;
2471
2472                         if (reserved > 0) {
2473                                 group->reserved += reserved;
2474                                 tb_dbg(tb, "group %d reserved %d total %d Mb/s\n",
2475                                        group->index, reserved, group->reserved);
2476
2477                                 /*
2478                                  * If it was not already pending,
2479                                  * schedule release now. If it is then
2480                                  * postpone it for the next 10s (unless
2481                                  * it is already running in which case
2482                                  * the 10s already expired and we should
2483                                  * give the reserved back to others).
2484                                  */
2485                                 mod_delayed_work(system_wq, &group->release_work,
2486                                         msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT));
2487                         }
2488                 }
2489
2490                 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2491                                                  requested_down);
2492         }
2493
2494         /*
2495          * More bandwidth is requested. Release all the potential
2496          * bandwidth from USB3 first.
2497          */
2498         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
2499         if (ret)
2500                 goto fail;
2501
2502         /*
2503          * Then go over all tunnels that cross the same USB4 ports (they
2504          * are also in the same group but we use the same function here
2505          * that we use with the normal bandwidth allocation).
2506          */
2507         ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
2508                                      true);
2509         if (ret)
2510                 goto reclaim;
2511
2512         tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n",
2513                       available_up, available_down, group->reserved);
2514
2515         if ((*requested_up >= 0 &&
2516                 available_up + group->reserved >= requested_up_corrected) ||
2517             (*requested_down >= 0 &&
2518                 available_down + group->reserved >= requested_down_corrected)) {
2519                 int released = 0;
2520
2521                 /*
2522                  * If bandwidth on a link is >= asym_threshold
2523                  * transition the link to asymmetric.
2524                  */
2525                 ret = tb_configure_asym(tb, in, out, *requested_up,
2526                                         *requested_down);
2527                 if (ret) {
2528                         tb_configure_sym(tb, in, out, true);
2529                         goto fail;
2530                 }
2531
2532                 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2533                                                 requested_down);
2534                 if (ret) {
2535                         tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
2536                         tb_configure_sym(tb, in, out, true);
2537                 }
2538
2539                 if (downstream) {
2540                         if (*requested_down > available_down)
2541                                 released = *requested_down - available_down;
2542                 } else {
2543                         if (*requested_up > available_up)
2544                                 released = *requested_up - available_up;
2545                 }
2546                 if (released) {
2547                         group->reserved -= released;
2548                         tb_dbg(tb, "group %d released %d total %d Mb/s\n",
2549                                group->index, released, group->reserved);
2550                 }
2551         } else {
2552                 ret = -ENOBUFS;
2553         }
2554
2555 reclaim:
2556         tb_reclaim_usb3_bandwidth(tb, in, out);
2557 fail:
2558         if (ret && ret != -ENODEV) {
2559                 /*
2560                  * Write back the same allocated (so no change), this
2561                  * makes the DPTX request fail on graphics side.
2562                  */
2563                 tb_tunnel_dbg(tunnel,
2564                               "failing the request by rewriting allocated %d/%d Mb/s\n",
2565                               allocated_up, allocated_down);
2566                 tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down);
2567         }
2568
2569         return ret;
2570 }
2571
2572 static void tb_handle_dp_bandwidth_request(struct work_struct *work)
2573 {
2574         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2575         int requested_bw, requested_up, requested_down, ret;
2576         struct tb_tunnel *tunnel;
2577         struct tb *tb = ev->tb;
2578         struct tb_cm *tcm = tb_priv(tb);
2579         struct tb_switch *sw;
2580         struct tb_port *in;
2581
2582         pm_runtime_get_sync(&tb->dev);
2583
2584         mutex_lock(&tb->lock);
2585         if (!tcm->hotplug_active)
2586                 goto unlock;
2587
2588         sw = tb_switch_find_by_route(tb, ev->route);
2589         if (!sw) {
2590                 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
2591                         ev->route);
2592                 goto unlock;
2593         }
2594
2595         in = &sw->ports[ev->port];
2596         if (!tb_port_is_dpin(in)) {
2597                 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
2598                 goto put_sw;
2599         }
2600
2601         tb_port_dbg(in, "handling bandwidth allocation request\n");
2602
2603         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
2604         if (!tunnel) {
2605                 tb_port_warn(in, "failed to find tunnel\n");
2606                 goto put_sw;
2607         }
2608
2609         if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
2610                 if (tunnel->bw_mode) {
2611                         /*
2612                          * Reset the tunnel back to use the legacy
2613                          * allocation.
2614                          */
2615                         tunnel->bw_mode = false;
2616                         tb_port_dbg(in, "DPTX disabled bandwidth allocation mode\n");
2617                 } else {
2618                         tb_port_warn(in, "bandwidth allocation mode not enabled\n");
2619                 }
2620                 goto put_sw;
2621         }
2622
2623         ret = usb4_dp_port_requested_bandwidth(in);
2624         if (ret < 0) {
2625                 if (ret == -ENODATA) {
2626                         /*
2627                          * There is no request active so this means the
2628                          * BW allocation mode was enabled from graphics
2629                          * side. At this point we know that the graphics
2630                          * driver has read the DRPX capabilities so we
2631                          * can offer an better bandwidth estimatation.
2632                          */
2633                         tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n");
2634                         tb_recalc_estimated_bandwidth(tb);
2635                 } else {
2636                         tb_port_warn(in, "failed to read requested bandwidth\n");
2637                 }
2638                 goto put_sw;
2639         }
2640         requested_bw = ret;
2641
2642         tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
2643
2644         if (tb_tunnel_direction_downstream(tunnel)) {
2645                 requested_up = -1;
2646                 requested_down = requested_bw;
2647         } else {
2648                 requested_up = requested_bw;
2649                 requested_down = -1;
2650         }
2651
2652         ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
2653         if (ret) {
2654                 if (ret == -ENOBUFS)
2655                         tb_tunnel_warn(tunnel,
2656                                        "not enough bandwidth available\n");
2657                 else
2658                         tb_tunnel_warn(tunnel,
2659                                        "failed to change bandwidth allocation\n");
2660         } else {
2661                 tb_tunnel_dbg(tunnel,
2662                               "bandwidth allocation changed to %d/%d Mb/s\n",
2663                               requested_up, requested_down);
2664
2665                 /* Update other clients about the allocation change */
2666                 tb_recalc_estimated_bandwidth(tb);
2667         }
2668
2669 put_sw:
2670         tb_switch_put(sw);
2671 unlock:
2672         mutex_unlock(&tb->lock);
2673
2674         pm_runtime_mark_last_busy(&tb->dev);
2675         pm_runtime_put_autosuspend(&tb->dev);
2676
2677         kfree(ev);
2678 }
2679
2680 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
2681 {
2682         struct tb_hotplug_event *ev;
2683
2684         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
2685         if (!ev)
2686                 return;
2687
2688         ev->tb = tb;
2689         ev->route = route;
2690         ev->port = port;
2691         INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
2692         queue_work(tb->wq, &ev->work);
2693 }
2694
2695 static void tb_handle_notification(struct tb *tb, u64 route,
2696                                    const struct cfg_error_pkg *error)
2697 {
2698
2699         switch (error->error) {
2700         case TB_CFG_ERROR_PCIE_WAKE:
2701         case TB_CFG_ERROR_DP_CON_CHANGE:
2702         case TB_CFG_ERROR_DPTX_DISCOVERY:
2703                 if (tb_cfg_ack_notification(tb->ctl, route, error))
2704                         tb_warn(tb, "could not ack notification on %llx\n",
2705                                 route);
2706                 break;
2707
2708         case TB_CFG_ERROR_DP_BW:
2709                 if (tb_cfg_ack_notification(tb->ctl, route, error))
2710                         tb_warn(tb, "could not ack notification on %llx\n",
2711                                 route);
2712                 tb_queue_dp_bandwidth_request(tb, route, error->port);
2713                 break;
2714
2715         default:
2716                 /* Ignore for now */
2717                 break;
2718         }
2719 }
2720
2721 /*
2722  * tb_schedule_hotplug_handler() - callback function for the control channel
2723  *
2724  * Delegates to tb_handle_hotplug.
2725  */
2726 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
2727                             const void *buf, size_t size)
2728 {
2729         const struct cfg_event_pkg *pkg = buf;
2730         u64 route = tb_cfg_get_route(&pkg->header);
2731
2732         switch (type) {
2733         case TB_CFG_PKG_ERROR:
2734                 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
2735                 return;
2736         case TB_CFG_PKG_EVENT:
2737                 break;
2738         default:
2739                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
2740                 return;
2741         }
2742
2743         if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
2744                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
2745                         pkg->port);
2746         }
2747
2748         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2749 }
2750
2751 static void tb_stop(struct tb *tb)
2752 {
2753         struct tb_cm *tcm = tb_priv(tb);
2754         struct tb_tunnel *tunnel;
2755         struct tb_tunnel *n;
2756
2757         cancel_delayed_work(&tcm->remove_work);
2758         /* tunnels are only present after everything has been initialized */
2759         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2760                 /*
2761                  * DMA tunnels require the driver to be functional so we
2762                  * tear them down. Other protocol tunnels can be left
2763                  * intact.
2764                  */
2765                 if (tb_tunnel_is_dma(tunnel))
2766                         tb_tunnel_deactivate(tunnel);
2767                 tb_tunnel_free(tunnel);
2768         }
2769         tb_switch_remove(tb->root_switch);
2770         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2771 }
2772
2773 static void tb_deinit(struct tb *tb)
2774 {
2775         struct tb_cm *tcm = tb_priv(tb);
2776         int i;
2777
2778         /* Cancel all the release bandwidth workers */
2779         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++)
2780                 cancel_delayed_work_sync(&tcm->groups[i].release_work);
2781 }
2782
2783 static int tb_scan_finalize_switch(struct device *dev, void *data)
2784 {
2785         if (tb_is_switch(dev)) {
2786                 struct tb_switch *sw = tb_to_switch(dev);
2787
2788                 /*
2789                  * If we found that the switch was already setup by the
2790                  * boot firmware, mark it as authorized now before we
2791                  * send uevent to userspace.
2792                  */
2793                 if (sw->boot)
2794                         sw->authorized = 1;
2795
2796                 dev_set_uevent_suppress(dev, false);
2797                 kobject_uevent(&dev->kobj, KOBJ_ADD);
2798                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
2799         }
2800
2801         return 0;
2802 }
2803
2804 static int tb_start(struct tb *tb, bool reset)
2805 {
2806         struct tb_cm *tcm = tb_priv(tb);
2807         bool discover = true;
2808         int ret;
2809
2810         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2811         if (IS_ERR(tb->root_switch))
2812                 return PTR_ERR(tb->root_switch);
2813
2814         /*
2815          * ICM firmware upgrade needs running firmware and in native
2816          * mode that is not available so disable firmware upgrade of the
2817          * root switch.
2818          *
2819          * However, USB4 routers support NVM firmware upgrade if they
2820          * implement the necessary router operations.
2821          */
2822         tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
2823         /* All USB4 routers support runtime PM */
2824         tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2825
2826         ret = tb_switch_configure(tb->root_switch);
2827         if (ret) {
2828                 tb_switch_put(tb->root_switch);
2829                 return ret;
2830         }
2831
2832         /* Announce the switch to the world */
2833         ret = tb_switch_add(tb->root_switch);
2834         if (ret) {
2835                 tb_switch_put(tb->root_switch);
2836                 return ret;
2837         }
2838
2839         /*
2840          * To support highest CLx state, we set host router's TMU to
2841          * Normal mode.
2842          */
2843         tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2844         /* Enable TMU if it is off */
2845         tb_switch_tmu_enable(tb->root_switch);
2846
2847         /*
2848          * Boot firmware might have created tunnels of its own. Since we
2849          * cannot be sure they are usable for us, tear them down and
2850          * reset the ports to handle it as new hotplug for USB4 v1
2851          * routers (for USB4 v2 and beyond we already do host reset).
2852          */
2853         if (reset && tb_switch_is_usb4(tb->root_switch)) {
2854                 discover = false;
2855                 if (usb4_switch_version(tb->root_switch) == 1)
2856                         tb_switch_reset(tb->root_switch);
2857         }
2858
2859         if (discover) {
2860                 /* Full scan to discover devices added before the driver was loaded. */
2861                 tb_scan_switch(tb->root_switch);
2862                 /* Find out tunnels created by the boot firmware */
2863                 tb_discover_tunnels(tb);
2864                 /* Add DP resources from the DP tunnels created by the boot firmware */
2865                 tb_discover_dp_resources(tb);
2866         }
2867
2868         /*
2869          * If the boot firmware did not create USB 3.x tunnels create them
2870          * now for the whole topology.
2871          */
2872         tb_create_usb3_tunnels(tb->root_switch);
2873         /* Add DP IN resources for the root switch */
2874         tb_add_dp_resources(tb->root_switch);
2875         /* Make the discovered switches available to the userspace */
2876         device_for_each_child(&tb->root_switch->dev, NULL,
2877                               tb_scan_finalize_switch);
2878
2879         /* Allow tb_handle_hotplug to progress events */
2880         tcm->hotplug_active = true;
2881         return 0;
2882 }
2883
2884 static int tb_suspend_noirq(struct tb *tb)
2885 {
2886         struct tb_cm *tcm = tb_priv(tb);
2887
2888         tb_dbg(tb, "suspending...\n");
2889         tb_disconnect_and_release_dp(tb);
2890         tb_switch_suspend(tb->root_switch, false);
2891         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2892         tb_dbg(tb, "suspend finished\n");
2893
2894         return 0;
2895 }
2896
2897 static void tb_restore_children(struct tb_switch *sw)
2898 {
2899         struct tb_port *port;
2900
2901         /* No need to restore if the router is already unplugged */
2902         if (sw->is_unplugged)
2903                 return;
2904
2905         if (tb_enable_clx(sw))
2906                 tb_sw_warn(sw, "failed to re-enable CL states\n");
2907
2908         if (tb_enable_tmu(sw))
2909                 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2910
2911         tb_switch_configuration_valid(sw);
2912
2913         tb_switch_for_each_port(sw, port) {
2914                 if (!tb_port_has_remote(port) && !port->xdomain)
2915                         continue;
2916
2917                 if (port->remote) {
2918                         tb_switch_set_link_width(port->remote->sw,
2919                                                  port->remote->sw->link_width);
2920                         tb_switch_configure_link(port->remote->sw);
2921
2922                         tb_restore_children(port->remote->sw);
2923                 } else if (port->xdomain) {
2924                         tb_port_configure_xdomain(port, port->xdomain);
2925                 }
2926         }
2927 }
2928
2929 static int tb_resume_noirq(struct tb *tb)
2930 {
2931         struct tb_cm *tcm = tb_priv(tb);
2932         struct tb_tunnel *tunnel, *n;
2933         unsigned int usb3_delay = 0;
2934         LIST_HEAD(tunnels);
2935
2936         tb_dbg(tb, "resuming...\n");
2937
2938         /*
2939          * For non-USB4 hosts (Apple systems) remove any PCIe devices
2940          * the firmware might have setup.
2941          */
2942         if (!tb_switch_is_usb4(tb->root_switch))
2943                 tb_switch_reset(tb->root_switch);
2944
2945         tb_switch_resume(tb->root_switch, false);
2946         tb_free_invalid_tunnels(tb);
2947         tb_free_unplugged_children(tb->root_switch);
2948         tb_restore_children(tb->root_switch);
2949
2950         /*
2951          * If we get here from suspend to disk the boot firmware or the
2952          * restore kernel might have created tunnels of its own. Since
2953          * we cannot be sure they are usable for us we find and tear
2954          * them down.
2955          */
2956         tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2957         list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
2958                 if (tb_tunnel_is_usb3(tunnel))
2959                         usb3_delay = 500;
2960                 tb_tunnel_deactivate(tunnel);
2961                 tb_tunnel_free(tunnel);
2962         }
2963
2964         /* Re-create our tunnels now */
2965         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2966                 /* USB3 requires delay before it can be re-activated */
2967                 if (tb_tunnel_is_usb3(tunnel)) {
2968                         msleep(usb3_delay);
2969                         /* Only need to do it once */
2970                         usb3_delay = 0;
2971                 }
2972                 tb_tunnel_restart(tunnel);
2973         }
2974         if (!list_empty(&tcm->tunnel_list)) {
2975                 /*
2976                  * the pcie links need some time to get going.
2977                  * 100ms works for me...
2978                  */
2979                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
2980                 msleep(100);
2981         }
2982          /* Allow tb_handle_hotplug to progress events */
2983         tcm->hotplug_active = true;
2984         tb_dbg(tb, "resume finished\n");
2985
2986         return 0;
2987 }
2988
2989 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2990 {
2991         struct tb_port *port;
2992         int ret = 0;
2993
2994         tb_switch_for_each_port(sw, port) {
2995                 if (tb_is_upstream_port(port))
2996                         continue;
2997                 if (port->xdomain && port->xdomain->is_unplugged) {
2998                         tb_retimer_remove_all(port);
2999                         tb_xdomain_remove(port->xdomain);
3000                         tb_port_unconfigure_xdomain(port);
3001                         port->xdomain = NULL;
3002                         ret++;
3003                 } else if (port->remote) {
3004                         ret += tb_free_unplugged_xdomains(port->remote->sw);
3005                 }
3006         }
3007
3008         return ret;
3009 }
3010
3011 static int tb_freeze_noirq(struct tb *tb)
3012 {
3013         struct tb_cm *tcm = tb_priv(tb);
3014
3015         tcm->hotplug_active = false;
3016         return 0;
3017 }
3018
3019 static int tb_thaw_noirq(struct tb *tb)
3020 {
3021         struct tb_cm *tcm = tb_priv(tb);
3022
3023         tcm->hotplug_active = true;
3024         return 0;
3025 }
3026
3027 static void tb_complete(struct tb *tb)
3028 {
3029         /*
3030          * Release any unplugged XDomains and if there is a case where
3031          * another domain is swapped in place of unplugged XDomain we
3032          * need to run another rescan.
3033          */
3034         mutex_lock(&tb->lock);
3035         if (tb_free_unplugged_xdomains(tb->root_switch))
3036                 tb_scan_switch(tb->root_switch);
3037         mutex_unlock(&tb->lock);
3038 }
3039
3040 static int tb_runtime_suspend(struct tb *tb)
3041 {
3042         struct tb_cm *tcm = tb_priv(tb);
3043
3044         mutex_lock(&tb->lock);
3045         tb_switch_suspend(tb->root_switch, true);
3046         tcm->hotplug_active = false;
3047         mutex_unlock(&tb->lock);
3048
3049         return 0;
3050 }
3051
3052 static void tb_remove_work(struct work_struct *work)
3053 {
3054         struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
3055         struct tb *tb = tcm_to_tb(tcm);
3056
3057         mutex_lock(&tb->lock);
3058         if (tb->root_switch) {
3059                 tb_free_unplugged_children(tb->root_switch);
3060                 tb_free_unplugged_xdomains(tb->root_switch);
3061         }
3062         mutex_unlock(&tb->lock);
3063 }
3064
3065 static int tb_runtime_resume(struct tb *tb)
3066 {
3067         struct tb_cm *tcm = tb_priv(tb);
3068         struct tb_tunnel *tunnel, *n;
3069
3070         mutex_lock(&tb->lock);
3071         tb_switch_resume(tb->root_switch, true);
3072         tb_free_invalid_tunnels(tb);
3073         tb_restore_children(tb->root_switch);
3074         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
3075                 tb_tunnel_restart(tunnel);
3076         tcm->hotplug_active = true;
3077         mutex_unlock(&tb->lock);
3078
3079         /*
3080          * Schedule cleanup of any unplugged devices. Run this in a
3081          * separate thread to avoid possible deadlock if the device
3082          * removal runtime resumes the unplugged device.
3083          */
3084         queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
3085         return 0;
3086 }
3087
3088 static const struct tb_cm_ops tb_cm_ops = {
3089         .start = tb_start,
3090         .stop = tb_stop,
3091         .deinit = tb_deinit,
3092         .suspend_noirq = tb_suspend_noirq,
3093         .resume_noirq = tb_resume_noirq,
3094         .freeze_noirq = tb_freeze_noirq,
3095         .thaw_noirq = tb_thaw_noirq,
3096         .complete = tb_complete,
3097         .runtime_suspend = tb_runtime_suspend,
3098         .runtime_resume = tb_runtime_resume,
3099         .handle_event = tb_handle_event,
3100         .disapprove_switch = tb_disconnect_pci,
3101         .approve_switch = tb_tunnel_pci,
3102         .approve_xdomain_paths = tb_approve_xdomain_paths,
3103         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
3104 };
3105
3106 /*
3107  * During suspend the Thunderbolt controller is reset and all PCIe
3108  * tunnels are lost. The NHI driver will try to reestablish all tunnels
3109  * during resume. This adds device links between the tunneled PCIe
3110  * downstream ports and the NHI so that the device core will make sure
3111  * NHI is resumed first before the rest.
3112  */
3113 static bool tb_apple_add_links(struct tb_nhi *nhi)
3114 {
3115         struct pci_dev *upstream, *pdev;
3116         bool ret;
3117
3118         if (!x86_apple_machine)
3119                 return false;
3120
3121         switch (nhi->pdev->device) {
3122         case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
3123         case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
3124         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
3125         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
3126                 break;
3127         default:
3128                 return false;
3129         }
3130
3131         upstream = pci_upstream_bridge(nhi->pdev);
3132         while (upstream) {
3133                 if (!pci_is_pcie(upstream))
3134                         return false;
3135                 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
3136                         break;
3137                 upstream = pci_upstream_bridge(upstream);
3138         }
3139
3140         if (!upstream)
3141                 return false;
3142
3143         /*
3144          * For each hotplug downstream port, create add device link
3145          * back to NHI so that PCIe tunnels can be re-established after
3146          * sleep.
3147          */
3148         ret = false;
3149         for_each_pci_bridge(pdev, upstream->subordinate) {
3150                 const struct device_link *link;
3151
3152                 if (!pci_is_pcie(pdev))
3153                         continue;
3154                 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
3155                     !pdev->is_hotplug_bridge)
3156                         continue;
3157
3158                 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
3159                                        DL_FLAG_AUTOREMOVE_SUPPLIER |
3160                                        DL_FLAG_PM_RUNTIME);
3161                 if (link) {
3162                         dev_dbg(&nhi->pdev->dev, "created link from %s\n",
3163                                 dev_name(&pdev->dev));
3164                         ret = true;
3165                 } else {
3166                         dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
3167                                  dev_name(&pdev->dev));
3168                 }
3169         }
3170
3171         return ret;
3172 }
3173
3174 struct tb *tb_probe(struct tb_nhi *nhi)
3175 {
3176         struct tb_cm *tcm;
3177         struct tb *tb;
3178
3179         tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
3180         if (!tb)
3181                 return NULL;
3182
3183         if (tb_acpi_may_tunnel_pcie())
3184                 tb->security_level = TB_SECURITY_USER;
3185         else
3186                 tb->security_level = TB_SECURITY_NOPCIE;
3187
3188         tb->cm_ops = &tb_cm_ops;
3189
3190         tcm = tb_priv(tb);
3191         INIT_LIST_HEAD(&tcm->tunnel_list);
3192         INIT_LIST_HEAD(&tcm->dp_resources);
3193         INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
3194         tb_init_bandwidth_groups(tcm);
3195
3196         tb_dbg(tb, "using software connection manager\n");
3197
3198         /*
3199          * Device links are needed to make sure we establish tunnels
3200          * before the PCIe/USB stack is resumed so complain here if we
3201          * found them missing.
3202          */
3203         if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
3204                 tb_warn(tb, "device links to tunneled native ports are missing!\n");
3205
3206         return tb;
3207 }
This page took 0.213616 seconds and 4 git commands to generate.