]> Git Repo - J-linux.git/blob - drivers/thunderbolt/tb.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <[email protected]>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
14
15 #include "tb.h"
16 #include "tb_regs.h"
17 #include "tunnel.h"
18
19 #define TB_TIMEOUT              100     /* ms */
20 #define TB_RELEASE_BW_TIMEOUT   10000   /* ms */
21
22 /*
23  * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
24  * direction. This is 40G - 10% guard band bandwidth.
25  */
26 #define TB_ASYM_MIN             (40000 * 90 / 100)
27
28 /*
29  * Threshold bandwidth (in Mb/s) that is used to switch the links to
30  * asymmetric and back. This is selected as 45G which means when the
31  * request is higher than this, we switch the link to asymmetric, and
32  * when it is less than this we switch it back. The 45G is selected so
33  * that we still have 27G (of the total 72G) for bulk PCIe traffic when
34  * switching back to symmetric.
35  */
36 #define TB_ASYM_THRESHOLD       45000
37
38 #define MAX_GROUPS              7       /* max Group_ID is 7 */
39
40 static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
41 module_param_named(asym_threshold, asym_threshold, uint, 0444);
42 MODULE_PARM_DESC(asym_threshold,
43                 "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
44                 __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
45
46 /**
47  * struct tb_cm - Simple Thunderbolt connection manager
48  * @tunnel_list: List of active tunnels
49  * @dp_resources: List of available DP resources for DP tunneling
50  * @hotplug_active: tb_handle_hotplug will stop progressing plug
51  *                  events and exit if this is not set (it needs to
52  *                  acquire the lock one more time). Used to drain wq
53  *                  after cfg has been paused.
54  * @remove_work: Work used to remove any unplugged routers after
55  *               runtime resume
56  * @groups: Bandwidth groups used in this domain.
57  */
58 struct tb_cm {
59         struct list_head tunnel_list;
60         struct list_head dp_resources;
61         bool hotplug_active;
62         struct delayed_work remove_work;
63         struct tb_bandwidth_group groups[MAX_GROUPS];
64 };
65
66 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
67 {
68         return ((void *)tcm - sizeof(struct tb));
69 }
70
71 struct tb_hotplug_event {
72         struct work_struct work;
73         struct tb *tb;
74         u64 route;
75         u8 port;
76         bool unplug;
77 };
78
79 static void tb_handle_hotplug(struct work_struct *work);
80
81 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
82 {
83         struct tb_hotplug_event *ev;
84
85         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
86         if (!ev)
87                 return;
88
89         ev->tb = tb;
90         ev->route = route;
91         ev->port = port;
92         ev->unplug = unplug;
93         INIT_WORK(&ev->work, tb_handle_hotplug);
94         queue_work(tb->wq, &ev->work);
95 }
96
97 /* enumeration & hot plug handling */
98
99 static void tb_add_dp_resources(struct tb_switch *sw)
100 {
101         struct tb_cm *tcm = tb_priv(sw->tb);
102         struct tb_port *port;
103
104         tb_switch_for_each_port(sw, port) {
105                 if (!tb_port_is_dpin(port))
106                         continue;
107
108                 if (!tb_switch_query_dp_resource(sw, port))
109                         continue;
110
111                 /*
112                  * If DP IN on device router exist, position it at the
113                  * beginning of the DP resources list, so that it is used
114                  * before DP IN of the host router. This way external GPU(s)
115                  * will be prioritized when pairing DP IN to a DP OUT.
116                  */
117                 if (tb_route(sw))
118                         list_add(&port->list, &tcm->dp_resources);
119                 else
120                         list_add_tail(&port->list, &tcm->dp_resources);
121
122                 tb_port_dbg(port, "DP IN resource available\n");
123         }
124 }
125
126 static void tb_remove_dp_resources(struct tb_switch *sw)
127 {
128         struct tb_cm *tcm = tb_priv(sw->tb);
129         struct tb_port *port, *tmp;
130
131         /* Clear children resources first */
132         tb_switch_for_each_port(sw, port) {
133                 if (tb_port_has_remote(port))
134                         tb_remove_dp_resources(port->remote->sw);
135         }
136
137         list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
138                 if (port->sw == sw) {
139                         tb_port_dbg(port, "DP OUT resource unavailable\n");
140                         list_del_init(&port->list);
141                 }
142         }
143 }
144
145 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
146 {
147         struct tb_cm *tcm = tb_priv(tb);
148         struct tb_port *p;
149
150         list_for_each_entry(p, &tcm->dp_resources, list) {
151                 if (p == port)
152                         return;
153         }
154
155         tb_port_dbg(port, "DP %s resource available discovered\n",
156                     tb_port_is_dpin(port) ? "IN" : "OUT");
157         list_add_tail(&port->list, &tcm->dp_resources);
158 }
159
160 static void tb_discover_dp_resources(struct tb *tb)
161 {
162         struct tb_cm *tcm = tb_priv(tb);
163         struct tb_tunnel *tunnel;
164
165         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
166                 if (tb_tunnel_is_dp(tunnel))
167                         tb_discover_dp_resource(tb, tunnel->dst_port);
168         }
169 }
170
171 /* Enables CL states up to host router */
172 static int tb_enable_clx(struct tb_switch *sw)
173 {
174         struct tb_cm *tcm = tb_priv(sw->tb);
175         unsigned int clx = TB_CL0S | TB_CL1;
176         const struct tb_tunnel *tunnel;
177         int ret;
178
179         /*
180          * Currently only enable CLx for the first link. This is enough
181          * to allow the CPU to save energy at least on Intel hardware
182          * and makes it slightly simpler to implement. We may change
183          * this in the future to cover the whole topology if it turns
184          * out to be beneficial.
185          */
186         while (sw && tb_switch_depth(sw) > 1)
187                 sw = tb_switch_parent(sw);
188
189         if (!sw)
190                 return 0;
191
192         if (tb_switch_depth(sw) != 1)
193                 return 0;
194
195         /*
196          * If we are re-enabling then check if there is an active DMA
197          * tunnel and in that case bail out.
198          */
199         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
200                 if (tb_tunnel_is_dma(tunnel)) {
201                         if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
202                                 return 0;
203                 }
204         }
205
206         /*
207          * Initially try with CL2. If that's not supported by the
208          * topology try with CL0s and CL1 and then give up.
209          */
210         ret = tb_switch_clx_enable(sw, clx | TB_CL2);
211         if (ret == -EOPNOTSUPP)
212                 ret = tb_switch_clx_enable(sw, clx);
213         return ret == -EOPNOTSUPP ? 0 : ret;
214 }
215
216 /**
217  * tb_disable_clx() - Disable CL states up to host router
218  * @sw: Router to start
219  *
220  * Disables CL states from @sw up to the host router. Returns true if
221  * any CL state were disabled. This can be used to figure out whether
222  * the link was setup by us or the boot firmware so we don't
223  * accidentally enable them if they were not enabled during discovery.
224  */
225 static bool tb_disable_clx(struct tb_switch *sw)
226 {
227         bool disabled = false;
228
229         do {
230                 int ret;
231
232                 ret = tb_switch_clx_disable(sw);
233                 if (ret > 0)
234                         disabled = true;
235                 else if (ret < 0)
236                         tb_sw_warn(sw, "failed to disable CL states\n");
237
238                 sw = tb_switch_parent(sw);
239         } while (sw);
240
241         return disabled;
242 }
243
244 static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
245 {
246         struct tb_switch *sw;
247
248         sw = tb_to_switch(dev);
249         if (!sw)
250                 return 0;
251
252         if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
253                 enum tb_switch_tmu_mode mode;
254                 int ret;
255
256                 if (tb_switch_clx_is_enabled(sw, TB_CL1))
257                         mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
258                 else
259                         mode = TB_SWITCH_TMU_MODE_HIFI_BI;
260
261                 ret = tb_switch_tmu_configure(sw, mode);
262                 if (ret)
263                         return ret;
264
265                 return tb_switch_tmu_enable(sw);
266         }
267
268         return 0;
269 }
270
271 static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
272 {
273         struct tb_switch *sw;
274
275         if (!tunnel)
276                 return;
277
278         /*
279          * Once first DP tunnel is established we change the TMU
280          * accuracy of first depth child routers (and the host router)
281          * to the highest. This is needed for the DP tunneling to work
282          * but also allows CL0s.
283          *
284          * If both routers are v2 then we don't need to do anything as
285          * they are using enhanced TMU mode that allows all CLx.
286          */
287         sw = tunnel->tb->root_switch;
288         device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
289 }
290
291 static int tb_switch_tmu_hifi_uni_required(struct device *dev, void *not_used)
292 {
293         struct tb_switch *sw = tb_to_switch(dev);
294
295         if (sw && tb_switch_tmu_is_enabled(sw) &&
296             tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_HIFI_UNI))
297                 return 1;
298
299         return device_for_each_child(dev, NULL,
300                                      tb_switch_tmu_hifi_uni_required);
301 }
302
303 static bool tb_tmu_hifi_uni_required(struct tb *tb)
304 {
305         return device_for_each_child(&tb->dev, NULL,
306                                      tb_switch_tmu_hifi_uni_required) == 1;
307 }
308
309 static int tb_enable_tmu(struct tb_switch *sw)
310 {
311         int ret;
312
313         /*
314          * If both routers at the end of the link are v2 we simply
315          * enable the enhanched uni-directional mode. That covers all
316          * the CL states. For v1 and before we need to use the normal
317          * rate to allow CL1 (when supported). Otherwise we keep the TMU
318          * running at the highest accuracy.
319          */
320         ret = tb_switch_tmu_configure(sw,
321                         TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
322         if (ret == -EOPNOTSUPP) {
323                 if (tb_switch_clx_is_enabled(sw, TB_CL1)) {
324                         /*
325                          * Figure out uni-directional HiFi TMU requirements
326                          * currently in the domain. If there are no
327                          * uni-directional HiFi requirements we can put the TMU
328                          * into LowRes mode.
329                          *
330                          * Deliberately skip bi-directional HiFi links
331                          * as these work independently of other links
332                          * (and they do not allow any CL states anyway).
333                          */
334                         if (tb_tmu_hifi_uni_required(sw->tb))
335                                 ret = tb_switch_tmu_configure(sw,
336                                                 TB_SWITCH_TMU_MODE_HIFI_UNI);
337                         else
338                                 ret = tb_switch_tmu_configure(sw,
339                                                 TB_SWITCH_TMU_MODE_LOWRES);
340                 } else {
341                         ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI);
342                 }
343
344                 /* If not supported, fallback to bi-directional HiFi */
345                 if (ret == -EOPNOTSUPP)
346                         ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI);
347         }
348         if (ret)
349                 return ret;
350
351         /* If it is already enabled in correct mode, don't touch it */
352         if (tb_switch_tmu_is_enabled(sw))
353                 return 0;
354
355         ret = tb_switch_tmu_disable(sw);
356         if (ret)
357                 return ret;
358
359         ret = tb_switch_tmu_post_time(sw);
360         if (ret)
361                 return ret;
362
363         return tb_switch_tmu_enable(sw);
364 }
365
366 static void tb_switch_discover_tunnels(struct tb_switch *sw,
367                                        struct list_head *list,
368                                        bool alloc_hopids)
369 {
370         struct tb *tb = sw->tb;
371         struct tb_port *port;
372
373         tb_switch_for_each_port(sw, port) {
374                 struct tb_tunnel *tunnel = NULL;
375
376                 switch (port->config.type) {
377                 case TB_TYPE_DP_HDMI_IN:
378                         tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
379                         tb_increase_tmu_accuracy(tunnel);
380                         break;
381
382                 case TB_TYPE_PCIE_DOWN:
383                         tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
384                         break;
385
386                 case TB_TYPE_USB3_DOWN:
387                         tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
388                         break;
389
390                 default:
391                         break;
392                 }
393
394                 if (tunnel)
395                         list_add_tail(&tunnel->list, list);
396         }
397
398         tb_switch_for_each_port(sw, port) {
399                 if (tb_port_has_remote(port)) {
400                         tb_switch_discover_tunnels(port->remote->sw, list,
401                                                    alloc_hopids);
402                 }
403         }
404 }
405
406 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
407 {
408         if (tb_switch_is_usb4(port->sw))
409                 return usb4_port_configure_xdomain(port, xd);
410         return tb_lc_configure_xdomain(port);
411 }
412
413 static void tb_port_unconfigure_xdomain(struct tb_port *port)
414 {
415         if (tb_switch_is_usb4(port->sw))
416                 usb4_port_unconfigure_xdomain(port);
417         else
418                 tb_lc_unconfigure_xdomain(port);
419 }
420
421 static void tb_scan_xdomain(struct tb_port *port)
422 {
423         struct tb_switch *sw = port->sw;
424         struct tb *tb = sw->tb;
425         struct tb_xdomain *xd;
426         u64 route;
427
428         if (!tb_is_xdomain_enabled())
429                 return;
430
431         route = tb_downstream_route(port);
432         xd = tb_xdomain_find_by_route(tb, route);
433         if (xd) {
434                 tb_xdomain_put(xd);
435                 return;
436         }
437
438         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
439                               NULL);
440         if (xd) {
441                 tb_port_at(route, sw)->xdomain = xd;
442                 tb_port_configure_xdomain(port, xd);
443                 tb_xdomain_add(xd);
444         }
445 }
446
447 /**
448  * tb_find_unused_port() - return the first inactive port on @sw
449  * @sw: Switch to find the port on
450  * @type: Port type to look for
451  */
452 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
453                                            enum tb_port_type type)
454 {
455         struct tb_port *port;
456
457         tb_switch_for_each_port(sw, port) {
458                 if (tb_is_upstream_port(port))
459                         continue;
460                 if (port->config.type != type)
461                         continue;
462                 if (!port->cap_adap)
463                         continue;
464                 if (tb_port_is_enabled(port))
465                         continue;
466                 return port;
467         }
468         return NULL;
469 }
470
471 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
472                                          const struct tb_port *port)
473 {
474         struct tb_port *down;
475
476         down = usb4_switch_map_usb3_down(sw, port);
477         if (down && !tb_usb3_port_is_enabled(down))
478                 return down;
479         return NULL;
480 }
481
482 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
483                                         struct tb_port *src_port,
484                                         struct tb_port *dst_port)
485 {
486         struct tb_cm *tcm = tb_priv(tb);
487         struct tb_tunnel *tunnel;
488
489         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
490                 if (tunnel->type == type &&
491                     ((src_port && src_port == tunnel->src_port) ||
492                      (dst_port && dst_port == tunnel->dst_port))) {
493                         return tunnel;
494                 }
495         }
496
497         return NULL;
498 }
499
500 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
501                                                    struct tb_port *src_port,
502                                                    struct tb_port *dst_port)
503 {
504         struct tb_port *port, *usb3_down;
505         struct tb_switch *sw;
506
507         /* Pick the router that is deepest in the topology */
508         if (tb_port_path_direction_downstream(src_port, dst_port))
509                 sw = dst_port->sw;
510         else
511                 sw = src_port->sw;
512
513         /* Can't be the host router */
514         if (sw == tb->root_switch)
515                 return NULL;
516
517         /* Find the downstream USB4 port that leads to this router */
518         port = tb_port_at(tb_route(sw), tb->root_switch);
519         /* Find the corresponding host router USB3 downstream port */
520         usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
521         if (!usb3_down)
522                 return NULL;
523
524         return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
525 }
526
527 /**
528  * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
529  * @tb: Domain structure
530  * @src_port: Source protocol adapter
531  * @dst_port: Destination protocol adapter
532  * @port: USB4 port the consumed bandwidth is calculated
533  * @consumed_up: Consumed upsream bandwidth (Mb/s)
534  * @consumed_down: Consumed downstream bandwidth (Mb/s)
535  *
536  * Calculates consumed USB3 and PCIe bandwidth at @port between path
537  * from @src_port to @dst_port. Does not take USB3 tunnel starting from
538  * @src_port and ending on @src_port into account because that bandwidth is
539  * already included in as part of the "first hop" USB3 tunnel.
540  */
541 static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
542                                            struct tb_port *src_port,
543                                            struct tb_port *dst_port,
544                                            struct tb_port *port,
545                                            int *consumed_up,
546                                            int *consumed_down)
547 {
548         int pci_consumed_up, pci_consumed_down;
549         struct tb_tunnel *tunnel;
550
551         *consumed_up = *consumed_down = 0;
552
553         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
554         if (tunnel && !tb_port_is_usb3_down(src_port) &&
555             !tb_port_is_usb3_up(dst_port)) {
556                 int ret;
557
558                 ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
559                                                    consumed_down);
560                 if (ret)
561                         return ret;
562         }
563
564         /*
565          * If there is anything reserved for PCIe bulk traffic take it
566          * into account here too.
567          */
568         if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
569                 *consumed_up += pci_consumed_up;
570                 *consumed_down += pci_consumed_down;
571         }
572
573         return 0;
574 }
575
576 /**
577  * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
578  * @tb: Domain structure
579  * @src_port: Source protocol adapter
580  * @dst_port: Destination protocol adapter
581  * @port: USB4 port the consumed bandwidth is calculated
582  * @consumed_up: Consumed upsream bandwidth (Mb/s)
583  * @consumed_down: Consumed downstream bandwidth (Mb/s)
584  *
585  * Calculates consumed DP bandwidth at @port between path from @src_port
586  * to @dst_port. Does not take tunnel starting from @src_port and ending
587  * from @src_port into account.
588  *
589  * If there is bandwidth reserved for any of the groups between
590  * @src_port and @dst_port (but not yet used) that is also taken into
591  * account in the returned consumed bandwidth.
592  */
593 static int tb_consumed_dp_bandwidth(struct tb *tb,
594                                     struct tb_port *src_port,
595                                     struct tb_port *dst_port,
596                                     struct tb_port *port,
597                                     int *consumed_up,
598                                     int *consumed_down)
599 {
600         int group_reserved[MAX_GROUPS] = {};
601         struct tb_cm *tcm = tb_priv(tb);
602         struct tb_tunnel *tunnel;
603         bool downstream;
604         int i, ret;
605
606         *consumed_up = *consumed_down = 0;
607
608         /*
609          * Find all DP tunnels that cross the port and reduce
610          * their consumed bandwidth from the available.
611          */
612         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
613                 const struct tb_bandwidth_group *group;
614                 int dp_consumed_up, dp_consumed_down;
615
616                 if (tb_tunnel_is_invalid(tunnel))
617                         continue;
618
619                 if (!tb_tunnel_is_dp(tunnel))
620                         continue;
621
622                 if (!tb_tunnel_port_on_path(tunnel, port))
623                         continue;
624
625                 /*
626                  * Calculate what is reserved for groups crossing the
627                  * same ports only once (as that is reserved for all the
628                  * tunnels in the group).
629                  */
630                 group = tunnel->src_port->group;
631                 if (group && group->reserved && !group_reserved[group->index])
632                         group_reserved[group->index] = group->reserved;
633
634                 /*
635                  * Ignore the DP tunnel between src_port and dst_port
636                  * because it is the same tunnel and we may be
637                  * re-calculating estimated bandwidth.
638                  */
639                 if (tunnel->src_port == src_port &&
640                     tunnel->dst_port == dst_port)
641                         continue;
642
643                 ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
644                                                    &dp_consumed_down);
645                 if (ret)
646                         return ret;
647
648                 *consumed_up += dp_consumed_up;
649                 *consumed_down += dp_consumed_down;
650         }
651
652         downstream = tb_port_path_direction_downstream(src_port, dst_port);
653         for (i = 0; i < ARRAY_SIZE(group_reserved); i++) {
654                 if (downstream)
655                         *consumed_down += group_reserved[i];
656                 else
657                         *consumed_up += group_reserved[i];
658         }
659
660         return 0;
661 }
662
663 static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
664                               struct tb_port *port)
665 {
666         bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
667         enum tb_link_width width;
668
669         if (tb_is_upstream_port(port))
670                 width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
671         else
672                 width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
673
674         return tb_port_width_supported(port, width);
675 }
676
677 /**
678  * tb_maximum_bandwidth() - Maximum bandwidth over a single link
679  * @tb: Domain structure
680  * @src_port: Source protocol adapter
681  * @dst_port: Destination protocol adapter
682  * @port: USB4 port the total bandwidth is calculated
683  * @max_up: Maximum upstream bandwidth (Mb/s)
684  * @max_down: Maximum downstream bandwidth (Mb/s)
685  * @include_asym: Include bandwidth if the link is switched from
686  *                symmetric to asymmetric
687  *
688  * Returns maximum possible bandwidth in @max_up and @max_down over a
689  * single link at @port. If @include_asym is set then includes the
690  * additional banwdith if the links are transitioned into asymmetric to
691  * direction from @src_port to @dst_port.
692  */
693 static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
694                                 struct tb_port *dst_port, struct tb_port *port,
695                                 int *max_up, int *max_down, bool include_asym)
696 {
697         bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
698         int link_speed, link_width, up_bw, down_bw;
699
700         /*
701          * Can include asymmetric, only if it is actually supported by
702          * the lane adapter.
703          */
704         if (!tb_asym_supported(src_port, dst_port, port))
705                 include_asym = false;
706
707         if (tb_is_upstream_port(port)) {
708                 link_speed = port->sw->link_speed;
709                 /*
710                  * sw->link_width is from upstream perspective so we use
711                  * the opposite for downstream of the host router.
712                  */
713                 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
714                         up_bw = link_speed * 3 * 1000;
715                         down_bw = link_speed * 1 * 1000;
716                 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
717                         up_bw = link_speed * 1 * 1000;
718                         down_bw = link_speed * 3 * 1000;
719                 } else if (include_asym) {
720                         /*
721                          * The link is symmetric at the moment but we
722                          * can switch it to asymmetric as needed. Report
723                          * this bandwidth as available (even though it
724                          * is not yet enabled).
725                          */
726                         if (downstream) {
727                                 up_bw = link_speed * 1 * 1000;
728                                 down_bw = link_speed * 3 * 1000;
729                         } else {
730                                 up_bw = link_speed * 3 * 1000;
731                                 down_bw = link_speed * 1 * 1000;
732                         }
733                 } else {
734                         up_bw = link_speed * port->sw->link_width * 1000;
735                         down_bw = up_bw;
736                 }
737         } else {
738                 link_speed = tb_port_get_link_speed(port);
739                 if (link_speed < 0)
740                         return link_speed;
741
742                 link_width = tb_port_get_link_width(port);
743                 if (link_width < 0)
744                         return link_width;
745
746                 if (link_width == TB_LINK_WIDTH_ASYM_TX) {
747                         up_bw = link_speed * 1 * 1000;
748                         down_bw = link_speed * 3 * 1000;
749                 } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
750                         up_bw = link_speed * 3 * 1000;
751                         down_bw = link_speed * 1 * 1000;
752                 } else if (include_asym) {
753                         /*
754                          * The link is symmetric at the moment but we
755                          * can switch it to asymmetric as needed. Report
756                          * this bandwidth as available (even though it
757                          * is not yet enabled).
758                          */
759                         if (downstream) {
760                                 up_bw = link_speed * 1 * 1000;
761                                 down_bw = link_speed * 3 * 1000;
762                         } else {
763                                 up_bw = link_speed * 3 * 1000;
764                                 down_bw = link_speed * 1 * 1000;
765                         }
766                 } else {
767                         up_bw = link_speed * link_width * 1000;
768                         down_bw = up_bw;
769                 }
770         }
771
772         /* Leave 10% guard band */
773         *max_up = up_bw - up_bw / 10;
774         *max_down = down_bw - down_bw / 10;
775
776         tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
777         return 0;
778 }
779
780 /**
781  * tb_available_bandwidth() - Available bandwidth for tunneling
782  * @tb: Domain structure
783  * @src_port: Source protocol adapter
784  * @dst_port: Destination protocol adapter
785  * @available_up: Available bandwidth upstream (Mb/s)
786  * @available_down: Available bandwidth downstream (Mb/s)
787  * @include_asym: Include bandwidth if the link is switched from
788  *                symmetric to asymmetric
789  *
790  * Calculates maximum available bandwidth for protocol tunneling between
791  * @src_port and @dst_port at the moment. This is minimum of maximum
792  * link bandwidth across all links reduced by currently consumed
793  * bandwidth on that link.
794  *
795  * If @include_asym is true then includes also bandwidth that can be
796  * added when the links are transitioned into asymmetric (but does not
797  * transition the links).
798  */
799 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
800                                  struct tb_port *dst_port, int *available_up,
801                                  int *available_down, bool include_asym)
802 {
803         struct tb_port *port;
804         int ret;
805
806         /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
807         *available_up = *available_down = 120000;
808
809         /* Find the minimum available bandwidth over all links */
810         tb_for_each_port_on_path(src_port, dst_port, port) {
811                 int max_up, max_down, consumed_up, consumed_down;
812
813                 if (!tb_port_is_null(port))
814                         continue;
815
816                 ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
817                                            &max_up, &max_down, include_asym);
818                 if (ret)
819                         return ret;
820
821                 ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
822                                                       port, &consumed_up,
823                                                       &consumed_down);
824                 if (ret)
825                         return ret;
826                 max_up -= consumed_up;
827                 max_down -= consumed_down;
828
829                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
830                                                &consumed_up, &consumed_down);
831                 if (ret)
832                         return ret;
833                 max_up -= consumed_up;
834                 max_down -= consumed_down;
835
836                 if (max_up < *available_up)
837                         *available_up = max_up;
838                 if (max_down < *available_down)
839                         *available_down = max_down;
840         }
841
842         if (*available_up < 0)
843                 *available_up = 0;
844         if (*available_down < 0)
845                 *available_down = 0;
846
847         return 0;
848 }
849
850 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
851                                             struct tb_port *src_port,
852                                             struct tb_port *dst_port)
853 {
854         struct tb_tunnel *tunnel;
855
856         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
857         return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
858 }
859
860 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
861                                       struct tb_port *dst_port)
862 {
863         int ret, available_up, available_down;
864         struct tb_tunnel *tunnel;
865
866         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
867         if (!tunnel)
868                 return;
869
870         tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n");
871
872         /*
873          * Calculate available bandwidth for the first hop USB3 tunnel.
874          * That determines the whole USB3 bandwidth for this branch.
875          */
876         ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
877                                      &available_up, &available_down, false);
878         if (ret) {
879                 tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n");
880                 return;
881         }
882
883         tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up,
884                       available_down);
885
886         tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
887 }
888
889 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
890 {
891         struct tb_switch *parent = tb_switch_parent(sw);
892         int ret, available_up, available_down;
893         struct tb_port *up, *down, *port;
894         struct tb_cm *tcm = tb_priv(tb);
895         struct tb_tunnel *tunnel;
896
897         if (!tb_acpi_may_tunnel_usb3()) {
898                 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
899                 return 0;
900         }
901
902         up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
903         if (!up)
904                 return 0;
905
906         if (!sw->link_usb4)
907                 return 0;
908
909         /*
910          * Look up available down port. Since we are chaining it should
911          * be found right above this switch.
912          */
913         port = tb_switch_downstream_port(sw);
914         down = tb_find_usb3_down(parent, port);
915         if (!down)
916                 return 0;
917
918         if (tb_route(parent)) {
919                 struct tb_port *parent_up;
920                 /*
921                  * Check first that the parent switch has its upstream USB3
922                  * port enabled. Otherwise the chain is not complete and
923                  * there is no point setting up a new tunnel.
924                  */
925                 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
926                 if (!parent_up || !tb_port_is_enabled(parent_up))
927                         return 0;
928
929                 /* Make all unused bandwidth available for the new tunnel */
930                 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
931                 if (ret)
932                         return ret;
933         }
934
935         ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
936                                      false);
937         if (ret)
938                 goto err_reclaim;
939
940         tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
941                     available_up, available_down);
942
943         tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
944                                       available_down);
945         if (!tunnel) {
946                 ret = -ENOMEM;
947                 goto err_reclaim;
948         }
949
950         if (tb_tunnel_activate(tunnel)) {
951                 tb_port_info(up,
952                              "USB3 tunnel activation failed, aborting\n");
953                 ret = -EIO;
954                 goto err_free;
955         }
956
957         list_add_tail(&tunnel->list, &tcm->tunnel_list);
958         if (tb_route(parent))
959                 tb_reclaim_usb3_bandwidth(tb, down, up);
960
961         return 0;
962
963 err_free:
964         tb_tunnel_free(tunnel);
965 err_reclaim:
966         if (tb_route(parent))
967                 tb_reclaim_usb3_bandwidth(tb, down, up);
968
969         return ret;
970 }
971
972 static int tb_create_usb3_tunnels(struct tb_switch *sw)
973 {
974         struct tb_port *port;
975         int ret;
976
977         if (!tb_acpi_may_tunnel_usb3())
978                 return 0;
979
980         if (tb_route(sw)) {
981                 ret = tb_tunnel_usb3(sw->tb, sw);
982                 if (ret)
983                         return ret;
984         }
985
986         tb_switch_for_each_port(sw, port) {
987                 if (!tb_port_has_remote(port))
988                         continue;
989                 ret = tb_create_usb3_tunnels(port->remote->sw);
990                 if (ret)
991                         return ret;
992         }
993
994         return 0;
995 }
996
997 /**
998  * tb_configure_asym() - Transition links to asymmetric if needed
999  * @tb: Domain structure
1000  * @src_port: Source adapter to start the transition
1001  * @dst_port: Destination adapter
1002  * @requested_up: Additional bandwidth (Mb/s) required upstream
1003  * @requested_down: Additional bandwidth (Mb/s) required downstream
1004  *
1005  * Transition links between @src_port and @dst_port into asymmetric, with
1006  * three lanes in the direction from @src_port towards @dst_port and one lane
1007  * in the opposite direction, if the bandwidth requirements
1008  * (requested + currently consumed) on that link exceed @asym_threshold.
1009  *
1010  * Must be called with available >= requested over all links.
1011  */
1012 static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
1013                              struct tb_port *dst_port, int requested_up,
1014                              int requested_down)
1015 {
1016         bool clx = false, clx_disabled = false, downstream;
1017         struct tb_switch *sw;
1018         struct tb_port *up;
1019         int ret = 0;
1020
1021         if (!asym_threshold)
1022                 return 0;
1023
1024         downstream = tb_port_path_direction_downstream(src_port, dst_port);
1025         /* Pick up router deepest in the hierarchy */
1026         if (downstream)
1027                 sw = dst_port->sw;
1028         else
1029                 sw = src_port->sw;
1030
1031         tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1032                 struct tb_port *down = tb_switch_downstream_port(up->sw);
1033                 enum tb_link_width width_up, width_down;
1034                 int consumed_up, consumed_down;
1035
1036                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1037                                                &consumed_up, &consumed_down);
1038                 if (ret)
1039                         break;
1040
1041                 if (downstream) {
1042                         /*
1043                          * Downstream so make sure upstream is within the 36G
1044                          * (40G - guard band 10%), and the requested is above
1045                          * what the threshold is.
1046                          */
1047                         if (consumed_up + requested_up >= TB_ASYM_MIN) {
1048                                 ret = -ENOBUFS;
1049                                 break;
1050                         }
1051                         /* Does consumed + requested exceed the threshold */
1052                         if (consumed_down + requested_down < asym_threshold)
1053                                 continue;
1054
1055                         width_up = TB_LINK_WIDTH_ASYM_RX;
1056                         width_down = TB_LINK_WIDTH_ASYM_TX;
1057                 } else {
1058                         /* Upstream, the opposite of above */
1059                         if (consumed_down + requested_down >= TB_ASYM_MIN) {
1060                                 ret = -ENOBUFS;
1061                                 break;
1062                         }
1063                         if (consumed_up + requested_up < asym_threshold)
1064                                 continue;
1065
1066                         width_up = TB_LINK_WIDTH_ASYM_TX;
1067                         width_down = TB_LINK_WIDTH_ASYM_RX;
1068                 }
1069
1070                 if (up->sw->link_width == width_up)
1071                         continue;
1072
1073                 if (!tb_port_width_supported(up, width_up) ||
1074                     !tb_port_width_supported(down, width_down))
1075                         continue;
1076
1077                 /*
1078                  * Disable CL states before doing any transitions. We
1079                  * delayed it until now that we know there is a real
1080                  * transition taking place.
1081                  */
1082                 if (!clx_disabled) {
1083                         clx = tb_disable_clx(sw);
1084                         clx_disabled = true;
1085                 }
1086
1087                 tb_sw_dbg(up->sw, "configuring asymmetric link\n");
1088
1089                 /*
1090                  * Here requested + consumed > threshold so we need to
1091                  * transtion the link into asymmetric now.
1092                  */
1093                 ret = tb_switch_set_link_width(up->sw, width_up);
1094                 if (ret) {
1095                         tb_sw_warn(up->sw, "failed to set link width\n");
1096                         break;
1097                 }
1098         }
1099
1100         /* Re-enable CL states if they were previosly enabled */
1101         if (clx)
1102                 tb_enable_clx(sw);
1103
1104         return ret;
1105 }
1106
1107 /**
1108  * tb_configure_sym() - Transition links to symmetric if possible
1109  * @tb: Domain structure
1110  * @src_port: Source adapter to start the transition
1111  * @dst_port: Destination adapter
1112  * @keep_asym: Keep asymmetric link if preferred
1113  *
1114  * Goes over each link from @src_port to @dst_port and tries to
1115  * transition the link to symmetric if the currently consumed bandwidth
1116  * allows and link asymmetric preference is ignored (if @keep_asym is %false).
1117  */
1118 static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
1119                             struct tb_port *dst_port, bool keep_asym)
1120 {
1121         bool clx = false, clx_disabled = false, downstream;
1122         struct tb_switch *sw;
1123         struct tb_port *up;
1124         int ret = 0;
1125
1126         if (!asym_threshold)
1127                 return 0;
1128
1129         downstream = tb_port_path_direction_downstream(src_port, dst_port);
1130         /* Pick up router deepest in the hierarchy */
1131         if (downstream)
1132                 sw = dst_port->sw;
1133         else
1134                 sw = src_port->sw;
1135
1136         tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1137                 int consumed_up, consumed_down;
1138
1139                 /* Already symmetric */
1140                 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
1141                         continue;
1142                 /* Unplugged, no need to switch */
1143                 if (up->sw->is_unplugged)
1144                         continue;
1145
1146                 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1147                                                &consumed_up, &consumed_down);
1148                 if (ret)
1149                         break;
1150
1151                 if (downstream) {
1152                         /*
1153                          * Downstream so we want the consumed_down < threshold.
1154                          * Upstream traffic should be less than 36G (40G
1155                          * guard band 10%) as the link was configured asymmetric
1156                          * already.
1157                          */
1158                         if (consumed_down >= asym_threshold)
1159                                 continue;
1160                 } else {
1161                         if (consumed_up >= asym_threshold)
1162                                 continue;
1163                 }
1164
1165                 if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
1166                         continue;
1167
1168                 /*
1169                  * Here consumed < threshold so we can transition the
1170                  * link to symmetric.
1171                  *
1172                  * However, if the router prefers asymmetric link we
1173                  * honor that (unless @keep_asym is %false).
1174                  */
1175                 if (keep_asym &&
1176                     up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) {
1177                         tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n");
1178                         continue;
1179                 }
1180
1181                 /* Disable CL states before doing any transitions */
1182                 if (!clx_disabled) {
1183                         clx = tb_disable_clx(sw);
1184                         clx_disabled = true;
1185                 }
1186
1187                 tb_sw_dbg(up->sw, "configuring symmetric link\n");
1188
1189                 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
1190                 if (ret) {
1191                         tb_sw_warn(up->sw, "failed to set link width\n");
1192                         break;
1193                 }
1194         }
1195
1196         /* Re-enable CL states if they were previosly enabled */
1197         if (clx)
1198                 tb_enable_clx(sw);
1199
1200         return ret;
1201 }
1202
1203 static void tb_configure_link(struct tb_port *down, struct tb_port *up,
1204                               struct tb_switch *sw)
1205 {
1206         struct tb *tb = sw->tb;
1207
1208         /* Link the routers using both links if available */
1209         down->remote = up;
1210         up->remote = down;
1211         if (down->dual_link_port && up->dual_link_port) {
1212                 down->dual_link_port->remote = up->dual_link_port;
1213                 up->dual_link_port->remote = down->dual_link_port;
1214         }
1215
1216         /*
1217          * Enable lane bonding if the link is currently two single lane
1218          * links.
1219          */
1220         if (sw->link_width < TB_LINK_WIDTH_DUAL)
1221                 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
1222
1223         /*
1224          * Device router that comes up as symmetric link is
1225          * connected deeper in the hierarchy, we transition the links
1226          * above into symmetric if bandwidth allows.
1227          */
1228         if (tb_switch_depth(sw) > 1 &&
1229             tb_port_get_link_generation(up) >= 4 &&
1230             up->sw->link_width == TB_LINK_WIDTH_DUAL) {
1231                 struct tb_port *host_port;
1232
1233                 host_port = tb_port_at(tb_route(sw), tb->root_switch);
1234                 tb_configure_sym(tb, host_port, up, false);
1235         }
1236
1237         /* Set the link configured */
1238         tb_switch_configure_link(sw);
1239 }
1240
1241 static void tb_scan_port(struct tb_port *port);
1242
1243 /*
1244  * tb_scan_switch() - scan for and initialize downstream switches
1245  */
1246 static void tb_scan_switch(struct tb_switch *sw)
1247 {
1248         struct tb_port *port;
1249
1250         pm_runtime_get_sync(&sw->dev);
1251
1252         tb_switch_for_each_port(sw, port)
1253                 tb_scan_port(port);
1254
1255         pm_runtime_mark_last_busy(&sw->dev);
1256         pm_runtime_put_autosuspend(&sw->dev);
1257 }
1258
1259 /*
1260  * tb_scan_port() - check for and initialize switches below port
1261  */
1262 static void tb_scan_port(struct tb_port *port)
1263 {
1264         struct tb_cm *tcm = tb_priv(port->sw->tb);
1265         struct tb_port *upstream_port;
1266         bool discovery = false;
1267         struct tb_switch *sw;
1268
1269         if (tb_is_upstream_port(port))
1270                 return;
1271
1272         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
1273             !tb_dp_port_is_enabled(port)) {
1274                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
1275                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
1276                                  false);
1277                 return;
1278         }
1279
1280         if (port->config.type != TB_TYPE_PORT)
1281                 return;
1282         if (port->dual_link_port && port->link_nr)
1283                 return; /*
1284                          * Downstream switch is reachable through two ports.
1285                          * Only scan on the primary port (link_nr == 0).
1286                          */
1287
1288         if (port->usb4)
1289                 pm_runtime_get_sync(&port->usb4->dev);
1290
1291         if (tb_wait_for_port(port, false) <= 0)
1292                 goto out_rpm_put;
1293         if (port->remote) {
1294                 tb_port_dbg(port, "port already has a remote\n");
1295                 goto out_rpm_put;
1296         }
1297
1298         tb_retimer_scan(port, true);
1299
1300         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
1301                              tb_downstream_route(port));
1302         if (IS_ERR(sw)) {
1303                 /*
1304                  * If there is an error accessing the connected switch
1305                  * it may be connected to another domain. Also we allow
1306                  * the other domain to be connected to a max depth switch.
1307                  */
1308                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
1309                         tb_scan_xdomain(port);
1310                 goto out_rpm_put;
1311         }
1312
1313         if (tb_switch_configure(sw)) {
1314                 tb_switch_put(sw);
1315                 goto out_rpm_put;
1316         }
1317
1318         /*
1319          * If there was previously another domain connected remove it
1320          * first.
1321          */
1322         if (port->xdomain) {
1323                 tb_xdomain_remove(port->xdomain);
1324                 tb_port_unconfigure_xdomain(port);
1325                 port->xdomain = NULL;
1326         }
1327
1328         /*
1329          * Do not send uevents until we have discovered all existing
1330          * tunnels and know which switches were authorized already by
1331          * the boot firmware.
1332          */
1333         if (!tcm->hotplug_active) {
1334                 dev_set_uevent_suppress(&sw->dev, true);
1335                 discovery = true;
1336         }
1337
1338         /*
1339          * At the moment Thunderbolt 2 and beyond (devices with LC) we
1340          * can support runtime PM.
1341          */
1342         sw->rpm = sw->generation > 1;
1343
1344         if (tb_switch_add(sw)) {
1345                 tb_switch_put(sw);
1346                 goto out_rpm_put;
1347         }
1348
1349         upstream_port = tb_upstream_port(sw);
1350         tb_configure_link(port, upstream_port, sw);
1351
1352         /*
1353          * CL0s and CL1 are enabled and supported together.
1354          * Silently ignore CLx enabling in case CLx is not supported.
1355          */
1356         if (discovery)
1357                 tb_sw_dbg(sw, "discovery, not touching CL states\n");
1358         else if (tb_enable_clx(sw))
1359                 tb_sw_warn(sw, "failed to enable CL states\n");
1360
1361         if (tb_enable_tmu(sw))
1362                 tb_sw_warn(sw, "failed to enable TMU\n");
1363
1364         /*
1365          * Configuration valid needs to be set after the TMU has been
1366          * enabled for the upstream port of the router so we do it here.
1367          */
1368         tb_switch_configuration_valid(sw);
1369
1370         /* Scan upstream retimers */
1371         tb_retimer_scan(upstream_port, true);
1372
1373         /*
1374          * Create USB 3.x tunnels only when the switch is plugged to the
1375          * domain. This is because we scan the domain also during discovery
1376          * and want to discover existing USB 3.x tunnels before we create
1377          * any new.
1378          */
1379         if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1380                 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1381
1382         tb_add_dp_resources(sw);
1383         tb_scan_switch(sw);
1384
1385 out_rpm_put:
1386         if (port->usb4) {
1387                 pm_runtime_mark_last_busy(&port->usb4->dev);
1388                 pm_runtime_put_autosuspend(&port->usb4->dev);
1389         }
1390 }
1391
1392 static void
1393 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1394 {
1395         struct tb_tunnel *first_tunnel;
1396         struct tb *tb = group->tb;
1397         struct tb_port *in;
1398         int ret;
1399
1400         tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1401                group->index);
1402
1403         first_tunnel = NULL;
1404         list_for_each_entry(in, &group->ports, group_list) {
1405                 int estimated_bw, estimated_up, estimated_down;
1406                 struct tb_tunnel *tunnel;
1407                 struct tb_port *out;
1408
1409                 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1410                         continue;
1411
1412                 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1413                 if (WARN_ON(!tunnel))
1414                         break;
1415
1416                 if (!first_tunnel) {
1417                         /*
1418                          * Since USB3 bandwidth is shared by all DP
1419                          * tunnels under the host router USB4 port, even
1420                          * if they do not begin from the host router, we
1421                          * can release USB3 bandwidth just once and not
1422                          * for each tunnel separately.
1423                          */
1424                         first_tunnel = tunnel;
1425                         ret = tb_release_unused_usb3_bandwidth(tb,
1426                                 first_tunnel->src_port, first_tunnel->dst_port);
1427                         if (ret) {
1428                                 tb_tunnel_warn(tunnel,
1429                                         "failed to release unused bandwidth\n");
1430                                 break;
1431                         }
1432                 }
1433
1434                 out = tunnel->dst_port;
1435                 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1436                                              &estimated_down, true);
1437                 if (ret) {
1438                         tb_tunnel_warn(tunnel,
1439                                 "failed to re-calculate estimated bandwidth\n");
1440                         break;
1441                 }
1442
1443                 /*
1444                  * Estimated bandwidth includes:
1445                  *  - already allocated bandwidth for the DP tunnel
1446                  *  - available bandwidth along the path
1447                  *  - bandwidth allocated for USB 3.x but not used.
1448                  */
1449                 if (tb_tunnel_direction_downstream(tunnel))
1450                         estimated_bw = estimated_down;
1451                 else
1452                         estimated_bw = estimated_up;
1453
1454                 /*
1455                  * If there is reserved bandwidth for the group that is
1456                  * not yet released we report that too.
1457                  */
1458                 tb_tunnel_dbg(tunnel,
1459                               "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n",
1460                               estimated_bw, group->reserved,
1461                               estimated_bw + group->reserved);
1462
1463                 if (usb4_dp_port_set_estimated_bandwidth(in,
1464                                 estimated_bw + group->reserved))
1465                         tb_tunnel_warn(tunnel,
1466                                        "failed to update estimated bandwidth\n");
1467         }
1468
1469         if (first_tunnel)
1470                 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1471                                           first_tunnel->dst_port);
1472
1473         tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1474 }
1475
1476 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1477 {
1478         struct tb_cm *tcm = tb_priv(tb);
1479         int i;
1480
1481         tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1482
1483         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1484                 struct tb_bandwidth_group *group = &tcm->groups[i];
1485
1486                 if (!list_empty(&group->ports))
1487                         tb_recalc_estimated_bandwidth_for_group(group);
1488         }
1489
1490         tb_dbg(tb, "bandwidth re-calculation done\n");
1491 }
1492
1493 static bool __release_group_bandwidth(struct tb_bandwidth_group *group)
1494 {
1495         if (group->reserved) {
1496                 tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index,
1497                         group->reserved);
1498                 group->reserved = 0;
1499                 return true;
1500         }
1501         return false;
1502 }
1503
1504 static void __configure_group_sym(struct tb_bandwidth_group *group)
1505 {
1506         struct tb_tunnel *tunnel;
1507         struct tb_port *in;
1508
1509         if (list_empty(&group->ports))
1510                 return;
1511
1512         /*
1513          * All the tunnels in the group go through the same USB4 links
1514          * so we find the first one here and pass the IN and OUT
1515          * adapters to tb_configure_sym() which now transitions the
1516          * links back to symmetric if bandwidth requirement < asym_threshold.
1517          *
1518          * We do this here to avoid unnecessary transitions (for example
1519          * if the graphics released bandwidth for other tunnel in the
1520          * same group).
1521          */
1522         in = list_first_entry(&group->ports, struct tb_port, group_list);
1523         tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL);
1524         if (tunnel)
1525                 tb_configure_sym(group->tb, in, tunnel->dst_port, true);
1526 }
1527
1528 static void tb_bandwidth_group_release_work(struct work_struct *work)
1529 {
1530         struct tb_bandwidth_group *group =
1531                 container_of(work, typeof(*group), release_work.work);
1532         struct tb *tb = group->tb;
1533
1534         mutex_lock(&tb->lock);
1535         if (__release_group_bandwidth(group))
1536                 tb_recalc_estimated_bandwidth(tb);
1537         __configure_group_sym(group);
1538         mutex_unlock(&tb->lock);
1539 }
1540
1541 static void tb_init_bandwidth_groups(struct tb_cm *tcm)
1542 {
1543         int i;
1544
1545         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1546                 struct tb_bandwidth_group *group = &tcm->groups[i];
1547
1548                 group->tb = tcm_to_tb(tcm);
1549                 group->index = i + 1;
1550                 INIT_LIST_HEAD(&group->ports);
1551                 INIT_DELAYED_WORK(&group->release_work,
1552                                   tb_bandwidth_group_release_work);
1553         }
1554 }
1555
1556 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
1557                                            struct tb_port *in)
1558 {
1559         if (!group || WARN_ON(in->group))
1560                 return;
1561
1562         in->group = group;
1563         list_add_tail(&in->group_list, &group->ports);
1564
1565         tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
1566 }
1567
1568 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
1569 {
1570         int i;
1571
1572         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1573                 struct tb_bandwidth_group *group = &tcm->groups[i];
1574
1575                 if (list_empty(&group->ports))
1576                         return group;
1577         }
1578
1579         return NULL;
1580 }
1581
1582 static struct tb_bandwidth_group *
1583 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1584                           struct tb_port *out)
1585 {
1586         struct tb_bandwidth_group *group;
1587         struct tb_tunnel *tunnel;
1588
1589         /*
1590          * Find all DP tunnels that go through all the same USB4 links
1591          * as this one. Because we always setup tunnels the same way we
1592          * can just check for the routers at both ends of the tunnels
1593          * and if they are the same we have a match.
1594          */
1595         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1596                 if (!tb_tunnel_is_dp(tunnel))
1597                         continue;
1598
1599                 if (tunnel->src_port->sw == in->sw &&
1600                     tunnel->dst_port->sw == out->sw) {
1601                         group = tunnel->src_port->group;
1602                         if (group) {
1603                                 tb_bandwidth_group_attach_port(group, in);
1604                                 return group;
1605                         }
1606                 }
1607         }
1608
1609         /* Pick up next available group then */
1610         group = tb_find_free_bandwidth_group(tcm);
1611         if (group)
1612                 tb_bandwidth_group_attach_port(group, in);
1613         else
1614                 tb_port_warn(in, "no available bandwidth groups\n");
1615
1616         return group;
1617 }
1618
1619 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
1620                                         struct tb_port *out)
1621 {
1622         if (usb4_dp_port_bandwidth_mode_enabled(in)) {
1623                 int index, i;
1624
1625                 index = usb4_dp_port_group_id(in);
1626                 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1627                         if (tcm->groups[i].index == index) {
1628                                 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
1629                                 return;
1630                         }
1631                 }
1632         }
1633
1634         tb_attach_bandwidth_group(tcm, in, out);
1635 }
1636
1637 static void tb_detach_bandwidth_group(struct tb_port *in)
1638 {
1639         struct tb_bandwidth_group *group = in->group;
1640
1641         if (group) {
1642                 in->group = NULL;
1643                 list_del_init(&in->group_list);
1644
1645                 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
1646
1647                 /* No more tunnels so release the reserved bandwidth if any */
1648                 if (list_empty(&group->ports)) {
1649                         cancel_delayed_work(&group->release_work);
1650                         __release_group_bandwidth(group);
1651                 }
1652         }
1653 }
1654
1655 static void tb_discover_tunnels(struct tb *tb)
1656 {
1657         struct tb_cm *tcm = tb_priv(tb);
1658         struct tb_tunnel *tunnel;
1659
1660         tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
1661
1662         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1663                 if (tb_tunnel_is_pci(tunnel)) {
1664                         struct tb_switch *parent = tunnel->dst_port->sw;
1665
1666                         while (parent != tunnel->src_port->sw) {
1667                                 parent->boot = true;
1668                                 parent = tb_switch_parent(parent);
1669                         }
1670                 } else if (tb_tunnel_is_dp(tunnel)) {
1671                         struct tb_port *in = tunnel->src_port;
1672                         struct tb_port *out = tunnel->dst_port;
1673
1674                         /* Keep the domain from powering down */
1675                         pm_runtime_get_sync(&in->sw->dev);
1676                         pm_runtime_get_sync(&out->sw->dev);
1677
1678                         tb_discover_bandwidth_group(tcm, in, out);
1679                 }
1680         }
1681 }
1682
1683 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
1684 {
1685         struct tb_port *src_port, *dst_port;
1686         struct tb *tb;
1687
1688         if (!tunnel)
1689                 return;
1690
1691         tb_tunnel_deactivate(tunnel);
1692         list_del(&tunnel->list);
1693
1694         tb = tunnel->tb;
1695         src_port = tunnel->src_port;
1696         dst_port = tunnel->dst_port;
1697
1698         switch (tunnel->type) {
1699         case TB_TUNNEL_DP:
1700                 tb_detach_bandwidth_group(src_port);
1701                 /*
1702                  * In case of DP tunnel make sure the DP IN resource is
1703                  * deallocated properly.
1704                  */
1705                 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
1706                 /*
1707                  * If bandwidth on a link is < asym_threshold
1708                  * transition the link to symmetric.
1709                  */
1710                 tb_configure_sym(tb, src_port, dst_port, true);
1711                 /* Now we can allow the domain to runtime suspend again */
1712                 pm_runtime_mark_last_busy(&dst_port->sw->dev);
1713                 pm_runtime_put_autosuspend(&dst_port->sw->dev);
1714                 pm_runtime_mark_last_busy(&src_port->sw->dev);
1715                 pm_runtime_put_autosuspend(&src_port->sw->dev);
1716                 fallthrough;
1717
1718         case TB_TUNNEL_USB3:
1719                 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
1720                 break;
1721
1722         default:
1723                 /*
1724                  * PCIe and DMA tunnels do not consume guaranteed
1725                  * bandwidth.
1726                  */
1727                 break;
1728         }
1729
1730         tb_tunnel_free(tunnel);
1731 }
1732
1733 /*
1734  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
1735  */
1736 static void tb_free_invalid_tunnels(struct tb *tb)
1737 {
1738         struct tb_cm *tcm = tb_priv(tb);
1739         struct tb_tunnel *tunnel;
1740         struct tb_tunnel *n;
1741
1742         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1743                 if (tb_tunnel_is_invalid(tunnel))
1744                         tb_deactivate_and_free_tunnel(tunnel);
1745         }
1746 }
1747
1748 /*
1749  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
1750  */
1751 static void tb_free_unplugged_children(struct tb_switch *sw)
1752 {
1753         struct tb_port *port;
1754
1755         tb_switch_for_each_port(sw, port) {
1756                 if (!tb_port_has_remote(port))
1757                         continue;
1758
1759                 if (port->remote->sw->is_unplugged) {
1760                         tb_retimer_remove_all(port);
1761                         tb_remove_dp_resources(port->remote->sw);
1762                         tb_switch_unconfigure_link(port->remote->sw);
1763                         tb_switch_set_link_width(port->remote->sw,
1764                                                  TB_LINK_WIDTH_SINGLE);
1765                         tb_switch_remove(port->remote->sw);
1766                         port->remote = NULL;
1767                         if (port->dual_link_port)
1768                                 port->dual_link_port->remote = NULL;
1769                 } else {
1770                         tb_free_unplugged_children(port->remote->sw);
1771                 }
1772         }
1773 }
1774
1775 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
1776                                          const struct tb_port *port)
1777 {
1778         struct tb_port *down = NULL;
1779
1780         /*
1781          * To keep plugging devices consistently in the same PCIe
1782          * hierarchy, do mapping here for switch downstream PCIe ports.
1783          */
1784         if (tb_switch_is_usb4(sw)) {
1785                 down = usb4_switch_map_pcie_down(sw, port);
1786         } else if (!tb_route(sw)) {
1787                 int phy_port = tb_phy_port_from_link(port->port);
1788                 int index;
1789
1790                 /*
1791                  * Hard-coded Thunderbolt port to PCIe down port mapping
1792                  * per controller.
1793                  */
1794                 if (tb_switch_is_cactus_ridge(sw) ||
1795                     tb_switch_is_alpine_ridge(sw))
1796                         index = !phy_port ? 6 : 7;
1797                 else if (tb_switch_is_falcon_ridge(sw))
1798                         index = !phy_port ? 6 : 8;
1799                 else if (tb_switch_is_titan_ridge(sw))
1800                         index = !phy_port ? 8 : 9;
1801                 else
1802                         goto out;
1803
1804                 /* Validate the hard-coding */
1805                 if (WARN_ON(index > sw->config.max_port_number))
1806                         goto out;
1807
1808                 down = &sw->ports[index];
1809         }
1810
1811         if (down) {
1812                 if (WARN_ON(!tb_port_is_pcie_down(down)))
1813                         goto out;
1814                 if (tb_pci_port_is_enabled(down))
1815                         goto out;
1816
1817                 return down;
1818         }
1819
1820 out:
1821         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
1822 }
1823
1824 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1825 {
1826         struct tb_port *host_port, *port;
1827         struct tb_cm *tcm = tb_priv(tb);
1828
1829         host_port = tb_route(in->sw) ?
1830                 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1831
1832         list_for_each_entry(port, &tcm->dp_resources, list) {
1833                 if (!tb_port_is_dpout(port))
1834                         continue;
1835
1836                 if (tb_port_is_enabled(port)) {
1837                         tb_port_dbg(port, "DP OUT in use\n");
1838                         continue;
1839                 }
1840
1841                 /* Needs to be on different routers */
1842                 if (in->sw == port->sw) {
1843                         tb_port_dbg(port, "skipping DP OUT on same router\n");
1844                         continue;
1845                 }
1846
1847                 tb_port_dbg(port, "DP OUT available\n");
1848
1849                 /*
1850                  * Keep the DP tunnel under the topology starting from
1851                  * the same host router downstream port.
1852                  */
1853                 if (host_port && tb_route(port->sw)) {
1854                         struct tb_port *p;
1855
1856                         p = tb_port_at(tb_route(port->sw), tb->root_switch);
1857                         if (p != host_port)
1858                                 continue;
1859                 }
1860
1861                 return port;
1862         }
1863
1864         return NULL;
1865 }
1866
1867 static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
1868                              struct tb_port *out)
1869 {
1870         int available_up, available_down, ret, link_nr;
1871         struct tb_cm *tcm = tb_priv(tb);
1872         int consumed_up, consumed_down;
1873         struct tb_tunnel *tunnel;
1874
1875         /*
1876          * This is only applicable to links that are not bonded (so
1877          * when Thunderbolt 1 hardware is involved somewhere in the
1878          * topology). For these try to share the DP bandwidth between
1879          * the two lanes.
1880          */
1881         link_nr = 1;
1882         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1883                 if (tb_tunnel_is_dp(tunnel)) {
1884                         link_nr = 0;
1885                         break;
1886                 }
1887         }
1888
1889         /*
1890          * DP stream needs the domain to be active so runtime resume
1891          * both ends of the tunnel.
1892          *
1893          * This should bring the routers in the middle active as well
1894          * and keeps the domain from runtime suspending while the DP
1895          * tunnel is active.
1896          */
1897         pm_runtime_get_sync(&in->sw->dev);
1898         pm_runtime_get_sync(&out->sw->dev);
1899
1900         if (tb_switch_alloc_dp_resource(in->sw, in)) {
1901                 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
1902                 goto err_rpm_put;
1903         }
1904
1905         if (!tb_attach_bandwidth_group(tcm, in, out))
1906                 goto err_dealloc_dp;
1907
1908         /* Make all unused USB3 bandwidth available for the new DP tunnel */
1909         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1910         if (ret) {
1911                 tb_warn(tb, "failed to release unused bandwidth\n");
1912                 goto err_detach_group;
1913         }
1914
1915         ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
1916                                      true);
1917         if (ret)
1918                 goto err_reclaim_usb;
1919
1920         tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1921                available_up, available_down);
1922
1923         tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1924                                     available_down);
1925         if (!tunnel) {
1926                 tb_port_dbg(out, "could not allocate DP tunnel\n");
1927                 goto err_reclaim_usb;
1928         }
1929
1930         if (tb_tunnel_activate(tunnel)) {
1931                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1932                 goto err_free;
1933         }
1934
1935         /* If fail reading tunnel's consumed bandwidth, tear it down */
1936         ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
1937         if (ret)
1938                 goto err_deactivate;
1939
1940         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1941
1942         tb_reclaim_usb3_bandwidth(tb, in, out);
1943         /*
1944          * Transition the links to asymmetric if the consumption exceeds
1945          * the threshold.
1946          */
1947         tb_configure_asym(tb, in, out, consumed_up, consumed_down);
1948
1949         /* Update the domain with the new bandwidth estimation */
1950         tb_recalc_estimated_bandwidth(tb);
1951
1952         /*
1953          * In case of DP tunnel exists, change host router's 1st children
1954          * TMU mode to HiFi for CL0s to work.
1955          */
1956         tb_increase_tmu_accuracy(tunnel);
1957         return true;
1958
1959 err_deactivate:
1960         tb_tunnel_deactivate(tunnel);
1961 err_free:
1962         tb_tunnel_free(tunnel);
1963 err_reclaim_usb:
1964         tb_reclaim_usb3_bandwidth(tb, in, out);
1965 err_detach_group:
1966         tb_detach_bandwidth_group(in);
1967 err_dealloc_dp:
1968         tb_switch_dealloc_dp_resource(in->sw, in);
1969 err_rpm_put:
1970         pm_runtime_mark_last_busy(&out->sw->dev);
1971         pm_runtime_put_autosuspend(&out->sw->dev);
1972         pm_runtime_mark_last_busy(&in->sw->dev);
1973         pm_runtime_put_autosuspend(&in->sw->dev);
1974
1975         return false;
1976 }
1977
1978 static void tb_tunnel_dp(struct tb *tb)
1979 {
1980         struct tb_cm *tcm = tb_priv(tb);
1981         struct tb_port *port, *in, *out;
1982
1983         if (!tb_acpi_may_tunnel_dp()) {
1984                 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1985                 return;
1986         }
1987
1988         /*
1989          * Find pair of inactive DP IN and DP OUT adapters and then
1990          * establish a DP tunnel between them.
1991          */
1992         tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1993
1994         in = NULL;
1995         out = NULL;
1996         list_for_each_entry(port, &tcm->dp_resources, list) {
1997                 if (!tb_port_is_dpin(port))
1998                         continue;
1999
2000                 if (tb_port_is_enabled(port)) {
2001                         tb_port_dbg(port, "DP IN in use\n");
2002                         continue;
2003                 }
2004
2005                 in = port;
2006                 tb_port_dbg(in, "DP IN available\n");
2007
2008                 out = tb_find_dp_out(tb, port);
2009                 if (out)
2010                         tb_tunnel_one_dp(tb, in, out);
2011                 else
2012                         tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
2013         }
2014
2015         if (!in)
2016                 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
2017 }
2018
2019 static void tb_enter_redrive(struct tb_port *port)
2020 {
2021         struct tb_switch *sw = port->sw;
2022
2023         if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
2024                 return;
2025
2026         /*
2027          * If we get hot-unplug for the DP IN port of the host router
2028          * and the DP resource is not available anymore it means there
2029          * is a monitor connected directly to the Type-C port and we are
2030          * in "redrive" mode. For this to work we cannot enter RTD3 so
2031          * we bump up the runtime PM reference count here.
2032          */
2033         if (!tb_port_is_dpin(port))
2034                 return;
2035         if (tb_route(sw))
2036                 return;
2037         if (!tb_switch_query_dp_resource(sw, port)) {
2038                 port->redrive = true;
2039                 pm_runtime_get(&sw->dev);
2040                 tb_port_dbg(port, "enter redrive mode, keeping powered\n");
2041         }
2042 }
2043
2044 static void tb_exit_redrive(struct tb_port *port)
2045 {
2046         struct tb_switch *sw = port->sw;
2047
2048         if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
2049                 return;
2050
2051         if (!tb_port_is_dpin(port))
2052                 return;
2053         if (tb_route(sw))
2054                 return;
2055         if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
2056                 port->redrive = false;
2057                 pm_runtime_put(&sw->dev);
2058                 tb_port_dbg(port, "exit redrive mode\n");
2059         }
2060 }
2061
2062 static void tb_switch_enter_redrive(struct tb_switch *sw)
2063 {
2064         struct tb_port *port;
2065
2066         tb_switch_for_each_port(sw, port)
2067                 tb_enter_redrive(port);
2068 }
2069
2070 /*
2071  * Called during system and runtime suspend to forcefully exit redrive
2072  * mode without querying whether the resource is available.
2073  */
2074 static void tb_switch_exit_redrive(struct tb_switch *sw)
2075 {
2076         struct tb_port *port;
2077
2078         if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
2079                 return;
2080
2081         tb_switch_for_each_port(sw, port) {
2082                 if (!tb_port_is_dpin(port))
2083                         continue;
2084
2085                 if (port->redrive) {
2086                         port->redrive = false;
2087                         pm_runtime_put(&sw->dev);
2088                         tb_port_dbg(port, "exit redrive mode\n");
2089                 }
2090         }
2091 }
2092
2093 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
2094 {
2095         struct tb_port *in, *out;
2096         struct tb_tunnel *tunnel;
2097
2098         if (tb_port_is_dpin(port)) {
2099                 tb_port_dbg(port, "DP IN resource unavailable\n");
2100                 in = port;
2101                 out = NULL;
2102         } else {
2103                 tb_port_dbg(port, "DP OUT resource unavailable\n");
2104                 in = NULL;
2105                 out = port;
2106         }
2107
2108         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
2109         if (tunnel)
2110                 tb_deactivate_and_free_tunnel(tunnel);
2111         else
2112                 tb_enter_redrive(port);
2113         list_del_init(&port->list);
2114
2115         /*
2116          * See if there is another DP OUT port that can be used for
2117          * to create another tunnel.
2118          */
2119         tb_recalc_estimated_bandwidth(tb);
2120         tb_tunnel_dp(tb);
2121 }
2122
2123 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
2124 {
2125         struct tb_cm *tcm = tb_priv(tb);
2126         struct tb_port *p;
2127
2128         if (tb_port_is_enabled(port))
2129                 return;
2130
2131         list_for_each_entry(p, &tcm->dp_resources, list) {
2132                 if (p == port)
2133                         return;
2134         }
2135
2136         tb_port_dbg(port, "DP %s resource available after hotplug\n",
2137                     tb_port_is_dpin(port) ? "IN" : "OUT");
2138         list_add_tail(&port->list, &tcm->dp_resources);
2139         tb_exit_redrive(port);
2140
2141         /* Look for suitable DP IN <-> DP OUT pairs now */
2142         tb_tunnel_dp(tb);
2143 }
2144
2145 static void tb_disconnect_and_release_dp(struct tb *tb)
2146 {
2147         struct tb_cm *tcm = tb_priv(tb);
2148         struct tb_tunnel *tunnel, *n;
2149
2150         /*
2151          * Tear down all DP tunnels and release their resources. They
2152          * will be re-established after resume based on plug events.
2153          */
2154         list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
2155                 if (tb_tunnel_is_dp(tunnel))
2156                         tb_deactivate_and_free_tunnel(tunnel);
2157         }
2158
2159         while (!list_empty(&tcm->dp_resources)) {
2160                 struct tb_port *port;
2161
2162                 port = list_first_entry(&tcm->dp_resources,
2163                                         struct tb_port, list);
2164                 list_del_init(&port->list);
2165         }
2166 }
2167
2168 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
2169 {
2170         struct tb_tunnel *tunnel;
2171         struct tb_port *up;
2172
2173         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
2174         if (WARN_ON(!up))
2175                 return -ENODEV;
2176
2177         tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
2178         if (WARN_ON(!tunnel))
2179                 return -ENODEV;
2180
2181         tb_switch_xhci_disconnect(sw);
2182
2183         tb_tunnel_deactivate(tunnel);
2184         list_del(&tunnel->list);
2185         tb_tunnel_free(tunnel);
2186         return 0;
2187 }
2188
2189 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
2190 {
2191         struct tb_port *up, *down, *port;
2192         struct tb_cm *tcm = tb_priv(tb);
2193         struct tb_tunnel *tunnel;
2194
2195         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
2196         if (!up)
2197                 return 0;
2198
2199         /*
2200          * Look up available down port. Since we are chaining it should
2201          * be found right above this switch.
2202          */
2203         port = tb_switch_downstream_port(sw);
2204         down = tb_find_pcie_down(tb_switch_parent(sw), port);
2205         if (!down)
2206                 return 0;
2207
2208         tunnel = tb_tunnel_alloc_pci(tb, up, down);
2209         if (!tunnel)
2210                 return -ENOMEM;
2211
2212         if (tb_tunnel_activate(tunnel)) {
2213                 tb_port_info(up,
2214                              "PCIe tunnel activation failed, aborting\n");
2215                 tb_tunnel_free(tunnel);
2216                 return -EIO;
2217         }
2218
2219         /*
2220          * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
2221          * here.
2222          */
2223         if (tb_switch_pcie_l1_enable(sw))
2224                 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
2225
2226         if (tb_switch_xhci_connect(sw))
2227                 tb_sw_warn(sw, "failed to connect xHCI\n");
2228
2229         list_add_tail(&tunnel->list, &tcm->tunnel_list);
2230         return 0;
2231 }
2232
2233 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2234                                     int transmit_path, int transmit_ring,
2235                                     int receive_path, int receive_ring)
2236 {
2237         struct tb_cm *tcm = tb_priv(tb);
2238         struct tb_port *nhi_port, *dst_port;
2239         struct tb_tunnel *tunnel;
2240         struct tb_switch *sw;
2241         int ret;
2242
2243         sw = tb_to_switch(xd->dev.parent);
2244         dst_port = tb_port_at(xd->route, sw);
2245         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2246
2247         mutex_lock(&tb->lock);
2248
2249         /*
2250          * When tunneling DMA paths the link should not enter CL states
2251          * so disable them now.
2252          */
2253         tb_disable_clx(sw);
2254
2255         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
2256                                      transmit_ring, receive_path, receive_ring);
2257         if (!tunnel) {
2258                 ret = -ENOMEM;
2259                 goto err_clx;
2260         }
2261
2262         if (tb_tunnel_activate(tunnel)) {
2263                 tb_port_info(nhi_port,
2264                              "DMA tunnel activation failed, aborting\n");
2265                 ret = -EIO;
2266                 goto err_free;
2267         }
2268
2269         list_add_tail(&tunnel->list, &tcm->tunnel_list);
2270         mutex_unlock(&tb->lock);
2271         return 0;
2272
2273 err_free:
2274         tb_tunnel_free(tunnel);
2275 err_clx:
2276         tb_enable_clx(sw);
2277         mutex_unlock(&tb->lock);
2278
2279         return ret;
2280 }
2281
2282 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2283                                           int transmit_path, int transmit_ring,
2284                                           int receive_path, int receive_ring)
2285 {
2286         struct tb_cm *tcm = tb_priv(tb);
2287         struct tb_port *nhi_port, *dst_port;
2288         struct tb_tunnel *tunnel, *n;
2289         struct tb_switch *sw;
2290
2291         sw = tb_to_switch(xd->dev.parent);
2292         dst_port = tb_port_at(xd->route, sw);
2293         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2294
2295         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2296                 if (!tb_tunnel_is_dma(tunnel))
2297                         continue;
2298                 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
2299                         continue;
2300
2301                 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
2302                                         receive_path, receive_ring))
2303                         tb_deactivate_and_free_tunnel(tunnel);
2304         }
2305
2306         /*
2307          * Try to re-enable CL states now, it is OK if this fails
2308          * because we may still have another DMA tunnel active through
2309          * the same host router USB4 downstream port.
2310          */
2311         tb_enable_clx(sw);
2312 }
2313
2314 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2315                                        int transmit_path, int transmit_ring,
2316                                        int receive_path, int receive_ring)
2317 {
2318         if (!xd->is_unplugged) {
2319                 mutex_lock(&tb->lock);
2320                 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
2321                                               transmit_ring, receive_path,
2322                                               receive_ring);
2323                 mutex_unlock(&tb->lock);
2324         }
2325         return 0;
2326 }
2327
2328 /* hotplug handling */
2329
2330 /*
2331  * tb_handle_hotplug() - handle hotplug event
2332  *
2333  * Executes on tb->wq.
2334  */
2335 static void tb_handle_hotplug(struct work_struct *work)
2336 {
2337         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2338         struct tb *tb = ev->tb;
2339         struct tb_cm *tcm = tb_priv(tb);
2340         struct tb_switch *sw;
2341         struct tb_port *port;
2342
2343         /* Bring the domain back from sleep if it was suspended */
2344         pm_runtime_get_sync(&tb->dev);
2345
2346         mutex_lock(&tb->lock);
2347         if (!tcm->hotplug_active)
2348                 goto out; /* during init, suspend or shutdown */
2349
2350         sw = tb_switch_find_by_route(tb, ev->route);
2351         if (!sw) {
2352                 tb_warn(tb,
2353                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
2354                         ev->route, ev->port, ev->unplug);
2355                 goto out;
2356         }
2357         if (ev->port > sw->config.max_port_number) {
2358                 tb_warn(tb,
2359                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
2360                         ev->route, ev->port, ev->unplug);
2361                 goto put_sw;
2362         }
2363         port = &sw->ports[ev->port];
2364         if (tb_is_upstream_port(port)) {
2365                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
2366                        ev->route, ev->port, ev->unplug);
2367                 goto put_sw;
2368         }
2369
2370         pm_runtime_get_sync(&sw->dev);
2371
2372         if (ev->unplug) {
2373                 tb_retimer_remove_all(port);
2374
2375                 if (tb_port_has_remote(port)) {
2376                         tb_port_dbg(port, "switch unplugged\n");
2377                         tb_sw_set_unplugged(port->remote->sw);
2378                         tb_free_invalid_tunnels(tb);
2379                         tb_remove_dp_resources(port->remote->sw);
2380                         tb_switch_tmu_disable(port->remote->sw);
2381                         tb_switch_unconfigure_link(port->remote->sw);
2382                         tb_switch_set_link_width(port->remote->sw,
2383                                                  TB_LINK_WIDTH_SINGLE);
2384                         tb_switch_remove(port->remote->sw);
2385                         port->remote = NULL;
2386                         if (port->dual_link_port)
2387                                 port->dual_link_port->remote = NULL;
2388                         /* Maybe we can create another DP tunnel */
2389                         tb_recalc_estimated_bandwidth(tb);
2390                         tb_tunnel_dp(tb);
2391                 } else if (port->xdomain) {
2392                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
2393
2394                         tb_port_dbg(port, "xdomain unplugged\n");
2395                         /*
2396                          * Service drivers are unbound during
2397                          * tb_xdomain_remove() so setting XDomain as
2398                          * unplugged here prevents deadlock if they call
2399                          * tb_xdomain_disable_paths(). We will tear down
2400                          * all the tunnels below.
2401                          */
2402                         xd->is_unplugged = true;
2403                         tb_xdomain_remove(xd);
2404                         port->xdomain = NULL;
2405                         __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
2406                         tb_xdomain_put(xd);
2407                         tb_port_unconfigure_xdomain(port);
2408                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2409                         tb_dp_resource_unavailable(tb, port);
2410                 } else if (!port->port) {
2411                         tb_sw_dbg(sw, "xHCI disconnect request\n");
2412                         tb_switch_xhci_disconnect(sw);
2413                 } else {
2414                         tb_port_dbg(port,
2415                                    "got unplug event for disconnected port, ignoring\n");
2416                 }
2417         } else if (port->remote) {
2418                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
2419         } else if (!port->port && sw->authorized) {
2420                 tb_sw_dbg(sw, "xHCI connect request\n");
2421                 tb_switch_xhci_connect(sw);
2422         } else {
2423                 if (tb_port_is_null(port)) {
2424                         tb_port_dbg(port, "hotplug: scanning\n");
2425                         tb_scan_port(port);
2426                         if (!port->remote)
2427                                 tb_port_dbg(port, "hotplug: no switch found\n");
2428                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2429                         tb_dp_resource_available(tb, port);
2430                 }
2431         }
2432
2433         pm_runtime_mark_last_busy(&sw->dev);
2434         pm_runtime_put_autosuspend(&sw->dev);
2435
2436 put_sw:
2437         tb_switch_put(sw);
2438 out:
2439         mutex_unlock(&tb->lock);
2440
2441         pm_runtime_mark_last_busy(&tb->dev);
2442         pm_runtime_put_autosuspend(&tb->dev);
2443
2444         kfree(ev);
2445 }
2446
2447 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
2448                                  int *requested_down)
2449 {
2450         int allocated_up, allocated_down, available_up, available_down, ret;
2451         int requested_up_corrected, requested_down_corrected, granularity;
2452         int max_up, max_down, max_up_rounded, max_down_rounded;
2453         struct tb_bandwidth_group *group;
2454         struct tb *tb = tunnel->tb;
2455         struct tb_port *in, *out;
2456         bool downstream;
2457
2458         ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
2459         if (ret)
2460                 return ret;
2461
2462         in = tunnel->src_port;
2463         out = tunnel->dst_port;
2464
2465         tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n",
2466                       allocated_up, allocated_down);
2467
2468         /*
2469          * If we get rounded up request from graphics side, say HBR2 x 4
2470          * that is 17500 instead of 17280 (this is because of the
2471          * granularity), we allow it too. Here the graphics has already
2472          * negotiated with the DPRX the maximum possible rates (which is
2473          * 17280 in this case).
2474          *
2475          * Since the link cannot go higher than 17280 we use that in our
2476          * calculations but the DP IN adapter Allocated BW write must be
2477          * the same value (17500) otherwise the adapter will mark it as
2478          * failed for graphics.
2479          */
2480         ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
2481         if (ret)
2482                 goto fail;
2483
2484         ret = usb4_dp_port_granularity(in);
2485         if (ret < 0)
2486                 goto fail;
2487         granularity = ret;
2488
2489         max_up_rounded = roundup(max_up, granularity);
2490         max_down_rounded = roundup(max_down, granularity);
2491
2492         /*
2493          * This will "fix" the request down to the maximum supported
2494          * rate * lanes if it is at the maximum rounded up level.
2495          */
2496         requested_up_corrected = *requested_up;
2497         if (requested_up_corrected == max_up_rounded)
2498                 requested_up_corrected = max_up;
2499         else if (requested_up_corrected < 0)
2500                 requested_up_corrected = 0;
2501         requested_down_corrected = *requested_down;
2502         if (requested_down_corrected == max_down_rounded)
2503                 requested_down_corrected = max_down;
2504         else if (requested_down_corrected < 0)
2505                 requested_down_corrected = 0;
2506
2507         tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n",
2508                       requested_up_corrected, requested_down_corrected);
2509
2510         if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
2511             (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
2512                 tb_tunnel_dbg(tunnel,
2513                               "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
2514                               requested_up_corrected, requested_down_corrected,
2515                               max_up_rounded, max_down_rounded);
2516                 ret = -ENOBUFS;
2517                 goto fail;
2518         }
2519
2520         downstream = tb_tunnel_direction_downstream(tunnel);
2521         group = in->group;
2522
2523         if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
2524             (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
2525                 if (tunnel->bw_mode) {
2526                         int reserved;
2527                         /*
2528                          * If requested bandwidth is less or equal than
2529                          * what is currently allocated to that tunnel we
2530                          * simply change the reservation of the tunnel
2531                          * and add the released bandwidth for the group
2532                          * for the next 10s. Then we release it for
2533                          * others to use.
2534                          */
2535                         if (downstream)
2536                                 reserved = allocated_down - *requested_down;
2537                         else
2538                                 reserved = allocated_up - *requested_up;
2539
2540                         if (reserved > 0) {
2541                                 group->reserved += reserved;
2542                                 tb_dbg(tb, "group %d reserved %d total %d Mb/s\n",
2543                                        group->index, reserved, group->reserved);
2544
2545                                 /*
2546                                  * If it was not already pending,
2547                                  * schedule release now. If it is then
2548                                  * postpone it for the next 10s (unless
2549                                  * it is already running in which case
2550                                  * the 10s already expired and we should
2551                                  * give the reserved back to others).
2552                                  */
2553                                 mod_delayed_work(system_wq, &group->release_work,
2554                                         msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT));
2555                         }
2556                 }
2557
2558                 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2559                                                  requested_down);
2560         }
2561
2562         /*
2563          * More bandwidth is requested. Release all the potential
2564          * bandwidth from USB3 first.
2565          */
2566         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
2567         if (ret)
2568                 goto fail;
2569
2570         /*
2571          * Then go over all tunnels that cross the same USB4 ports (they
2572          * are also in the same group but we use the same function here
2573          * that we use with the normal bandwidth allocation).
2574          */
2575         ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
2576                                      true);
2577         if (ret)
2578                 goto reclaim;
2579
2580         tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n",
2581                       available_up, available_down, group->reserved);
2582
2583         if ((*requested_up >= 0 &&
2584                 available_up + group->reserved >= requested_up_corrected) ||
2585             (*requested_down >= 0 &&
2586                 available_down + group->reserved >= requested_down_corrected)) {
2587                 int released = 0;
2588
2589                 /*
2590                  * If bandwidth on a link is >= asym_threshold
2591                  * transition the link to asymmetric.
2592                  */
2593                 ret = tb_configure_asym(tb, in, out, *requested_up,
2594                                         *requested_down);
2595                 if (ret) {
2596                         tb_configure_sym(tb, in, out, true);
2597                         goto fail;
2598                 }
2599
2600                 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2601                                                 requested_down);
2602                 if (ret) {
2603                         tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
2604                         tb_configure_sym(tb, in, out, true);
2605                 }
2606
2607                 if (downstream) {
2608                         if (*requested_down > available_down)
2609                                 released = *requested_down - available_down;
2610                 } else {
2611                         if (*requested_up > available_up)
2612                                 released = *requested_up - available_up;
2613                 }
2614                 if (released) {
2615                         group->reserved -= released;
2616                         tb_dbg(tb, "group %d released %d total %d Mb/s\n",
2617                                group->index, released, group->reserved);
2618                 }
2619         } else {
2620                 ret = -ENOBUFS;
2621         }
2622
2623 reclaim:
2624         tb_reclaim_usb3_bandwidth(tb, in, out);
2625 fail:
2626         if (ret && ret != -ENODEV) {
2627                 /*
2628                  * Write back the same allocated (so no change), this
2629                  * makes the DPTX request fail on graphics side.
2630                  */
2631                 tb_tunnel_dbg(tunnel,
2632                               "failing the request by rewriting allocated %d/%d Mb/s\n",
2633                               allocated_up, allocated_down);
2634                 tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down);
2635         }
2636
2637         return ret;
2638 }
2639
2640 static void tb_handle_dp_bandwidth_request(struct work_struct *work)
2641 {
2642         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2643         int requested_bw, requested_up, requested_down, ret;
2644         struct tb_tunnel *tunnel;
2645         struct tb *tb = ev->tb;
2646         struct tb_cm *tcm = tb_priv(tb);
2647         struct tb_switch *sw;
2648         struct tb_port *in;
2649
2650         pm_runtime_get_sync(&tb->dev);
2651
2652         mutex_lock(&tb->lock);
2653         if (!tcm->hotplug_active)
2654                 goto unlock;
2655
2656         sw = tb_switch_find_by_route(tb, ev->route);
2657         if (!sw) {
2658                 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
2659                         ev->route);
2660                 goto unlock;
2661         }
2662
2663         in = &sw->ports[ev->port];
2664         if (!tb_port_is_dpin(in)) {
2665                 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
2666                 goto put_sw;
2667         }
2668
2669         tb_port_dbg(in, "handling bandwidth allocation request\n");
2670
2671         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
2672         if (!tunnel) {
2673                 tb_port_warn(in, "failed to find tunnel\n");
2674                 goto put_sw;
2675         }
2676
2677         if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
2678                 if (tunnel->bw_mode) {
2679                         /*
2680                          * Reset the tunnel back to use the legacy
2681                          * allocation.
2682                          */
2683                         tunnel->bw_mode = false;
2684                         tb_port_dbg(in, "DPTX disabled bandwidth allocation mode\n");
2685                 } else {
2686                         tb_port_warn(in, "bandwidth allocation mode not enabled\n");
2687                 }
2688                 goto put_sw;
2689         }
2690
2691         ret = usb4_dp_port_requested_bandwidth(in);
2692         if (ret < 0) {
2693                 if (ret == -ENODATA) {
2694                         /*
2695                          * There is no request active so this means the
2696                          * BW allocation mode was enabled from graphics
2697                          * side. At this point we know that the graphics
2698                          * driver has read the DRPX capabilities so we
2699                          * can offer an better bandwidth estimatation.
2700                          */
2701                         tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n");
2702                         tb_recalc_estimated_bandwidth(tb);
2703                 } else {
2704                         tb_port_warn(in, "failed to read requested bandwidth\n");
2705                 }
2706                 goto put_sw;
2707         }
2708         requested_bw = ret;
2709
2710         tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
2711
2712         if (tb_tunnel_direction_downstream(tunnel)) {
2713                 requested_up = -1;
2714                 requested_down = requested_bw;
2715         } else {
2716                 requested_up = requested_bw;
2717                 requested_down = -1;
2718         }
2719
2720         ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
2721         if (ret) {
2722                 if (ret == -ENOBUFS)
2723                         tb_tunnel_warn(tunnel,
2724                                        "not enough bandwidth available\n");
2725                 else
2726                         tb_tunnel_warn(tunnel,
2727                                        "failed to change bandwidth allocation\n");
2728         } else {
2729                 tb_tunnel_dbg(tunnel,
2730                               "bandwidth allocation changed to %d/%d Mb/s\n",
2731                               requested_up, requested_down);
2732
2733                 /* Update other clients about the allocation change */
2734                 tb_recalc_estimated_bandwidth(tb);
2735         }
2736
2737 put_sw:
2738         tb_switch_put(sw);
2739 unlock:
2740         mutex_unlock(&tb->lock);
2741
2742         pm_runtime_mark_last_busy(&tb->dev);
2743         pm_runtime_put_autosuspend(&tb->dev);
2744
2745         kfree(ev);
2746 }
2747
2748 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
2749 {
2750         struct tb_hotplug_event *ev;
2751
2752         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
2753         if (!ev)
2754                 return;
2755
2756         ev->tb = tb;
2757         ev->route = route;
2758         ev->port = port;
2759         INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
2760         queue_work(tb->wq, &ev->work);
2761 }
2762
2763 static void tb_handle_notification(struct tb *tb, u64 route,
2764                                    const struct cfg_error_pkg *error)
2765 {
2766
2767         switch (error->error) {
2768         case TB_CFG_ERROR_PCIE_WAKE:
2769         case TB_CFG_ERROR_DP_CON_CHANGE:
2770         case TB_CFG_ERROR_DPTX_DISCOVERY:
2771                 if (tb_cfg_ack_notification(tb->ctl, route, error))
2772                         tb_warn(tb, "could not ack notification on %llx\n",
2773                                 route);
2774                 break;
2775
2776         case TB_CFG_ERROR_DP_BW:
2777                 if (tb_cfg_ack_notification(tb->ctl, route, error))
2778                         tb_warn(tb, "could not ack notification on %llx\n",
2779                                 route);
2780                 tb_queue_dp_bandwidth_request(tb, route, error->port);
2781                 break;
2782
2783         default:
2784                 /* Ignore for now */
2785                 break;
2786         }
2787 }
2788
2789 /*
2790  * tb_schedule_hotplug_handler() - callback function for the control channel
2791  *
2792  * Delegates to tb_handle_hotplug.
2793  */
2794 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
2795                             const void *buf, size_t size)
2796 {
2797         const struct cfg_event_pkg *pkg = buf;
2798         u64 route = tb_cfg_get_route(&pkg->header);
2799
2800         switch (type) {
2801         case TB_CFG_PKG_ERROR:
2802                 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
2803                 return;
2804         case TB_CFG_PKG_EVENT:
2805                 break;
2806         default:
2807                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
2808                 return;
2809         }
2810
2811         if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
2812                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
2813                         pkg->port);
2814         }
2815
2816         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2817 }
2818
2819 static void tb_stop(struct tb *tb)
2820 {
2821         struct tb_cm *tcm = tb_priv(tb);
2822         struct tb_tunnel *tunnel;
2823         struct tb_tunnel *n;
2824
2825         cancel_delayed_work(&tcm->remove_work);
2826         /* tunnels are only present after everything has been initialized */
2827         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2828                 /*
2829                  * DMA tunnels require the driver to be functional so we
2830                  * tear them down. Other protocol tunnels can be left
2831                  * intact.
2832                  */
2833                 if (tb_tunnel_is_dma(tunnel))
2834                         tb_tunnel_deactivate(tunnel);
2835                 tb_tunnel_free(tunnel);
2836         }
2837         tb_switch_remove(tb->root_switch);
2838         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2839 }
2840
2841 static void tb_deinit(struct tb *tb)
2842 {
2843         struct tb_cm *tcm = tb_priv(tb);
2844         int i;
2845
2846         /* Cancel all the release bandwidth workers */
2847         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++)
2848                 cancel_delayed_work_sync(&tcm->groups[i].release_work);
2849 }
2850
2851 static int tb_scan_finalize_switch(struct device *dev, void *data)
2852 {
2853         if (tb_is_switch(dev)) {
2854                 struct tb_switch *sw = tb_to_switch(dev);
2855
2856                 /*
2857                  * If we found that the switch was already setup by the
2858                  * boot firmware, mark it as authorized now before we
2859                  * send uevent to userspace.
2860                  */
2861                 if (sw->boot)
2862                         sw->authorized = 1;
2863
2864                 dev_set_uevent_suppress(dev, false);
2865                 kobject_uevent(&dev->kobj, KOBJ_ADD);
2866                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
2867         }
2868
2869         return 0;
2870 }
2871
2872 static int tb_start(struct tb *tb, bool reset)
2873 {
2874         struct tb_cm *tcm = tb_priv(tb);
2875         bool discover = true;
2876         int ret;
2877
2878         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2879         if (IS_ERR(tb->root_switch))
2880                 return PTR_ERR(tb->root_switch);
2881
2882         /*
2883          * ICM firmware upgrade needs running firmware and in native
2884          * mode that is not available so disable firmware upgrade of the
2885          * root switch.
2886          *
2887          * However, USB4 routers support NVM firmware upgrade if they
2888          * implement the necessary router operations.
2889          */
2890         tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
2891         /* All USB4 routers support runtime PM */
2892         tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2893
2894         ret = tb_switch_configure(tb->root_switch);
2895         if (ret) {
2896                 tb_switch_put(tb->root_switch);
2897                 return ret;
2898         }
2899
2900         /* Announce the switch to the world */
2901         ret = tb_switch_add(tb->root_switch);
2902         if (ret) {
2903                 tb_switch_put(tb->root_switch);
2904                 return ret;
2905         }
2906
2907         /*
2908          * To support highest CLx state, we set host router's TMU to
2909          * Normal mode.
2910          */
2911         tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2912         /* Enable TMU if it is off */
2913         tb_switch_tmu_enable(tb->root_switch);
2914
2915         /*
2916          * Boot firmware might have created tunnels of its own. Since we
2917          * cannot be sure they are usable for us, tear them down and
2918          * reset the ports to handle it as new hotplug for USB4 v1
2919          * routers (for USB4 v2 and beyond we already do host reset).
2920          */
2921         if (reset && tb_switch_is_usb4(tb->root_switch)) {
2922                 discover = false;
2923                 if (usb4_switch_version(tb->root_switch) == 1)
2924                         tb_switch_reset(tb->root_switch);
2925         }
2926
2927         if (discover) {
2928                 /* Full scan to discover devices added before the driver was loaded. */
2929                 tb_scan_switch(tb->root_switch);
2930                 /* Find out tunnels created by the boot firmware */
2931                 tb_discover_tunnels(tb);
2932                 /* Add DP resources from the DP tunnels created by the boot firmware */
2933                 tb_discover_dp_resources(tb);
2934         }
2935
2936         /*
2937          * If the boot firmware did not create USB 3.x tunnels create them
2938          * now for the whole topology.
2939          */
2940         tb_create_usb3_tunnels(tb->root_switch);
2941         /* Add DP IN resources for the root switch */
2942         tb_add_dp_resources(tb->root_switch);
2943         tb_switch_enter_redrive(tb->root_switch);
2944         /* Make the discovered switches available to the userspace */
2945         device_for_each_child(&tb->root_switch->dev, NULL,
2946                               tb_scan_finalize_switch);
2947
2948         /* Allow tb_handle_hotplug to progress events */
2949         tcm->hotplug_active = true;
2950         return 0;
2951 }
2952
2953 static int tb_suspend_noirq(struct tb *tb)
2954 {
2955         struct tb_cm *tcm = tb_priv(tb);
2956
2957         tb_dbg(tb, "suspending...\n");
2958         tb_disconnect_and_release_dp(tb);
2959         tb_switch_exit_redrive(tb->root_switch);
2960         tb_switch_suspend(tb->root_switch, false);
2961         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2962         tb_dbg(tb, "suspend finished\n");
2963
2964         return 0;
2965 }
2966
2967 static void tb_restore_children(struct tb_switch *sw)
2968 {
2969         struct tb_port *port;
2970
2971         /* No need to restore if the router is already unplugged */
2972         if (sw->is_unplugged)
2973                 return;
2974
2975         if (tb_enable_clx(sw))
2976                 tb_sw_warn(sw, "failed to re-enable CL states\n");
2977
2978         if (tb_enable_tmu(sw))
2979                 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2980
2981         tb_switch_configuration_valid(sw);
2982
2983         tb_switch_for_each_port(sw, port) {
2984                 if (!tb_port_has_remote(port) && !port->xdomain)
2985                         continue;
2986
2987                 if (port->remote) {
2988                         tb_switch_set_link_width(port->remote->sw,
2989                                                  port->remote->sw->link_width);
2990                         tb_switch_configure_link(port->remote->sw);
2991
2992                         tb_restore_children(port->remote->sw);
2993                 } else if (port->xdomain) {
2994                         tb_port_configure_xdomain(port, port->xdomain);
2995                 }
2996         }
2997 }
2998
2999 static int tb_resume_noirq(struct tb *tb)
3000 {
3001         struct tb_cm *tcm = tb_priv(tb);
3002         struct tb_tunnel *tunnel, *n;
3003         unsigned int usb3_delay = 0;
3004         LIST_HEAD(tunnels);
3005
3006         tb_dbg(tb, "resuming...\n");
3007
3008         /*
3009          * For non-USB4 hosts (Apple systems) remove any PCIe devices
3010          * the firmware might have setup.
3011          */
3012         if (!tb_switch_is_usb4(tb->root_switch))
3013                 tb_switch_reset(tb->root_switch);
3014
3015         tb_switch_resume(tb->root_switch, false);
3016         tb_free_invalid_tunnels(tb);
3017         tb_free_unplugged_children(tb->root_switch);
3018         tb_restore_children(tb->root_switch);
3019
3020         /*
3021          * If we get here from suspend to disk the boot firmware or the
3022          * restore kernel might have created tunnels of its own. Since
3023          * we cannot be sure they are usable for us we find and tear
3024          * them down.
3025          */
3026         tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
3027         list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
3028                 if (tb_tunnel_is_usb3(tunnel))
3029                         usb3_delay = 500;
3030                 tb_tunnel_deactivate(tunnel);
3031                 tb_tunnel_free(tunnel);
3032         }
3033
3034         /* Re-create our tunnels now */
3035         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
3036                 /* USB3 requires delay before it can be re-activated */
3037                 if (tb_tunnel_is_usb3(tunnel)) {
3038                         msleep(usb3_delay);
3039                         /* Only need to do it once */
3040                         usb3_delay = 0;
3041                 }
3042                 tb_tunnel_restart(tunnel);
3043         }
3044         if (!list_empty(&tcm->tunnel_list)) {
3045                 /*
3046                  * the pcie links need some time to get going.
3047                  * 100ms works for me...
3048                  */
3049                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
3050                 msleep(100);
3051         }
3052         tb_switch_enter_redrive(tb->root_switch);
3053          /* Allow tb_handle_hotplug to progress events */
3054         tcm->hotplug_active = true;
3055         tb_dbg(tb, "resume finished\n");
3056
3057         return 0;
3058 }
3059
3060 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
3061 {
3062         struct tb_port *port;
3063         int ret = 0;
3064
3065         tb_switch_for_each_port(sw, port) {
3066                 if (tb_is_upstream_port(port))
3067                         continue;
3068                 if (port->xdomain && port->xdomain->is_unplugged) {
3069                         tb_retimer_remove_all(port);
3070                         tb_xdomain_remove(port->xdomain);
3071                         tb_port_unconfigure_xdomain(port);
3072                         port->xdomain = NULL;
3073                         ret++;
3074                 } else if (port->remote) {
3075                         ret += tb_free_unplugged_xdomains(port->remote->sw);
3076                 }
3077         }
3078
3079         return ret;
3080 }
3081
3082 static int tb_freeze_noirq(struct tb *tb)
3083 {
3084         struct tb_cm *tcm = tb_priv(tb);
3085
3086         tcm->hotplug_active = false;
3087         return 0;
3088 }
3089
3090 static int tb_thaw_noirq(struct tb *tb)
3091 {
3092         struct tb_cm *tcm = tb_priv(tb);
3093
3094         tcm->hotplug_active = true;
3095         return 0;
3096 }
3097
3098 static void tb_complete(struct tb *tb)
3099 {
3100         /*
3101          * Release any unplugged XDomains and if there is a case where
3102          * another domain is swapped in place of unplugged XDomain we
3103          * need to run another rescan.
3104          */
3105         mutex_lock(&tb->lock);
3106         if (tb_free_unplugged_xdomains(tb->root_switch))
3107                 tb_scan_switch(tb->root_switch);
3108         mutex_unlock(&tb->lock);
3109 }
3110
3111 static int tb_runtime_suspend(struct tb *tb)
3112 {
3113         struct tb_cm *tcm = tb_priv(tb);
3114
3115         mutex_lock(&tb->lock);
3116         /*
3117          * The below call only releases DP resources to allow exiting and
3118          * re-entering redrive mode.
3119          */
3120         tb_disconnect_and_release_dp(tb);
3121         tb_switch_exit_redrive(tb->root_switch);
3122         tb_switch_suspend(tb->root_switch, true);
3123         tcm->hotplug_active = false;
3124         mutex_unlock(&tb->lock);
3125
3126         return 0;
3127 }
3128
3129 static void tb_remove_work(struct work_struct *work)
3130 {
3131         struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
3132         struct tb *tb = tcm_to_tb(tcm);
3133
3134         mutex_lock(&tb->lock);
3135         if (tb->root_switch) {
3136                 tb_free_unplugged_children(tb->root_switch);
3137                 tb_free_unplugged_xdomains(tb->root_switch);
3138         }
3139         mutex_unlock(&tb->lock);
3140 }
3141
3142 static int tb_runtime_resume(struct tb *tb)
3143 {
3144         struct tb_cm *tcm = tb_priv(tb);
3145         struct tb_tunnel *tunnel, *n;
3146
3147         mutex_lock(&tb->lock);
3148         tb_switch_resume(tb->root_switch, true);
3149         tb_free_invalid_tunnels(tb);
3150         tb_restore_children(tb->root_switch);
3151         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
3152                 tb_tunnel_restart(tunnel);
3153         tb_switch_enter_redrive(tb->root_switch);
3154         tcm->hotplug_active = true;
3155         mutex_unlock(&tb->lock);
3156
3157         /*
3158          * Schedule cleanup of any unplugged devices. Run this in a
3159          * separate thread to avoid possible deadlock if the device
3160          * removal runtime resumes the unplugged device.
3161          */
3162         queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
3163         return 0;
3164 }
3165
3166 static const struct tb_cm_ops tb_cm_ops = {
3167         .start = tb_start,
3168         .stop = tb_stop,
3169         .deinit = tb_deinit,
3170         .suspend_noirq = tb_suspend_noirq,
3171         .resume_noirq = tb_resume_noirq,
3172         .freeze_noirq = tb_freeze_noirq,
3173         .thaw_noirq = tb_thaw_noirq,
3174         .complete = tb_complete,
3175         .runtime_suspend = tb_runtime_suspend,
3176         .runtime_resume = tb_runtime_resume,
3177         .handle_event = tb_handle_event,
3178         .disapprove_switch = tb_disconnect_pci,
3179         .approve_switch = tb_tunnel_pci,
3180         .approve_xdomain_paths = tb_approve_xdomain_paths,
3181         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
3182 };
3183
3184 /*
3185  * During suspend the Thunderbolt controller is reset and all PCIe
3186  * tunnels are lost. The NHI driver will try to reestablish all tunnels
3187  * during resume. This adds device links between the tunneled PCIe
3188  * downstream ports and the NHI so that the device core will make sure
3189  * NHI is resumed first before the rest.
3190  */
3191 static bool tb_apple_add_links(struct tb_nhi *nhi)
3192 {
3193         struct pci_dev *upstream, *pdev;
3194         bool ret;
3195
3196         if (!x86_apple_machine)
3197                 return false;
3198
3199         switch (nhi->pdev->device) {
3200         case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
3201         case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
3202         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
3203         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
3204                 break;
3205         default:
3206                 return false;
3207         }
3208
3209         upstream = pci_upstream_bridge(nhi->pdev);
3210         while (upstream) {
3211                 if (!pci_is_pcie(upstream))
3212                         return false;
3213                 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
3214                         break;
3215                 upstream = pci_upstream_bridge(upstream);
3216         }
3217
3218         if (!upstream)
3219                 return false;
3220
3221         /*
3222          * For each hotplug downstream port, create add device link
3223          * back to NHI so that PCIe tunnels can be re-established after
3224          * sleep.
3225          */
3226         ret = false;
3227         for_each_pci_bridge(pdev, upstream->subordinate) {
3228                 const struct device_link *link;
3229
3230                 if (!pci_is_pcie(pdev))
3231                         continue;
3232                 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
3233                     !pdev->is_hotplug_bridge)
3234                         continue;
3235
3236                 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
3237                                        DL_FLAG_AUTOREMOVE_SUPPLIER |
3238                                        DL_FLAG_PM_RUNTIME);
3239                 if (link) {
3240                         dev_dbg(&nhi->pdev->dev, "created link from %s\n",
3241                                 dev_name(&pdev->dev));
3242                         ret = true;
3243                 } else {
3244                         dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
3245                                  dev_name(&pdev->dev));
3246                 }
3247         }
3248
3249         return ret;
3250 }
3251
3252 struct tb *tb_probe(struct tb_nhi *nhi)
3253 {
3254         struct tb_cm *tcm;
3255         struct tb *tb;
3256
3257         tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
3258         if (!tb)
3259                 return NULL;
3260
3261         if (tb_acpi_may_tunnel_pcie())
3262                 tb->security_level = TB_SECURITY_USER;
3263         else
3264                 tb->security_level = TB_SECURITY_NOPCIE;
3265
3266         tb->cm_ops = &tb_cm_ops;
3267
3268         tcm = tb_priv(tb);
3269         INIT_LIST_HEAD(&tcm->tunnel_list);
3270         INIT_LIST_HEAD(&tcm->dp_resources);
3271         INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
3272         tb_init_bandwidth_groups(tcm);
3273
3274         tb_dbg(tb, "using software connection manager\n");
3275
3276         /*
3277          * Device links are needed to make sure we establish tunnels
3278          * before the PCIe/USB stack is resumed so complain here if we
3279          * found them missing.
3280          */
3281         if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
3282                 tb_warn(tb, "device links to tunneled native ports are missing!\n");
3283
3284         return tb;
3285 }
This page took 0.213883 seconds and 4 git commands to generate.