1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2020, Intel Corporation
9 #include <kunit/test.h>
10 #include <linux/idr.h>
15 static int __ida_init(struct kunit_resource *res, void *context)
17 struct ida *ida = context;
24 static void __ida_destroy(struct kunit_resource *res)
26 struct ida *ida = res->data;
31 static void kunit_ida_init(struct kunit *test, struct ida *ida)
33 kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
36 static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
37 u8 upstream_port, u8 max_port_number)
43 sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
47 sw->config.upstream_port_number = upstream_port;
48 sw->config.depth = tb_route_length(route);
49 sw->config.route_hi = upper_32_bits(route);
50 sw->config.route_lo = lower_32_bits(route);
51 sw->config.enabled = 0;
52 sw->config.max_port_number = max_port_number;
54 size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
55 sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
59 for (i = 0; i <= sw->config.max_port_number; i++) {
61 sw->ports[i].port = i;
62 sw->ports[i].config.port_number = i;
64 kunit_ida_init(test, &sw->ports[i].in_hopids);
65 kunit_ida_init(test, &sw->ports[i].out_hopids);
72 static struct tb_switch *alloc_host(struct kunit *test)
76 sw = alloc_switch(test, 0, 7, 13);
80 sw->config.vendor_id = 0x8086;
81 sw->config.device_id = 0x9a1b;
83 sw->ports[0].config.type = TB_TYPE_PORT;
84 sw->ports[0].config.max_in_hop_id = 7;
85 sw->ports[0].config.max_out_hop_id = 7;
87 sw->ports[1].config.type = TB_TYPE_PORT;
88 sw->ports[1].config.max_in_hop_id = 19;
89 sw->ports[1].config.max_out_hop_id = 19;
90 sw->ports[1].dual_link_port = &sw->ports[2];
92 sw->ports[2].config.type = TB_TYPE_PORT;
93 sw->ports[2].config.max_in_hop_id = 19;
94 sw->ports[2].config.max_out_hop_id = 19;
95 sw->ports[2].dual_link_port = &sw->ports[1];
96 sw->ports[2].link_nr = 1;
98 sw->ports[3].config.type = TB_TYPE_PORT;
99 sw->ports[3].config.max_in_hop_id = 19;
100 sw->ports[3].config.max_out_hop_id = 19;
101 sw->ports[3].dual_link_port = &sw->ports[4];
103 sw->ports[4].config.type = TB_TYPE_PORT;
104 sw->ports[4].config.max_in_hop_id = 19;
105 sw->ports[4].config.max_out_hop_id = 19;
106 sw->ports[4].dual_link_port = &sw->ports[3];
107 sw->ports[4].link_nr = 1;
109 sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
110 sw->ports[5].config.max_in_hop_id = 9;
111 sw->ports[5].config.max_out_hop_id = 9;
112 sw->ports[5].cap_adap = -1;
114 sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
115 sw->ports[6].config.max_in_hop_id = 9;
116 sw->ports[6].config.max_out_hop_id = 9;
117 sw->ports[6].cap_adap = -1;
119 sw->ports[7].config.type = TB_TYPE_NHI;
120 sw->ports[7].config.max_in_hop_id = 11;
121 sw->ports[7].config.max_out_hop_id = 11;
123 sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
124 sw->ports[8].config.max_in_hop_id = 8;
125 sw->ports[8].config.max_out_hop_id = 8;
127 sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
128 sw->ports[9].config.max_in_hop_id = 8;
129 sw->ports[9].config.max_out_hop_id = 8;
131 sw->ports[10].disabled = true;
132 sw->ports[11].disabled = true;
134 sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
135 sw->ports[12].config.max_in_hop_id = 8;
136 sw->ports[12].config.max_out_hop_id = 8;
138 sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
139 sw->ports[13].config.max_in_hop_id = 8;
140 sw->ports[13].config.max_out_hop_id = 8;
145 static struct tb_switch *alloc_dev_default(struct kunit *test,
146 struct tb_switch *parent,
147 u64 route, bool bonded)
149 struct tb_port *port, *upstream_port;
150 struct tb_switch *sw;
152 sw = alloc_switch(test, route, 1, 19);
156 sw->config.vendor_id = 0x8086;
157 sw->config.device_id = 0x15ef;
159 sw->ports[0].config.type = TB_TYPE_PORT;
160 sw->ports[0].config.max_in_hop_id = 8;
161 sw->ports[0].config.max_out_hop_id = 8;
163 sw->ports[1].config.type = TB_TYPE_PORT;
164 sw->ports[1].config.max_in_hop_id = 19;
165 sw->ports[1].config.max_out_hop_id = 19;
166 sw->ports[1].dual_link_port = &sw->ports[2];
168 sw->ports[2].config.type = TB_TYPE_PORT;
169 sw->ports[2].config.max_in_hop_id = 19;
170 sw->ports[2].config.max_out_hop_id = 19;
171 sw->ports[2].dual_link_port = &sw->ports[1];
172 sw->ports[2].link_nr = 1;
174 sw->ports[3].config.type = TB_TYPE_PORT;
175 sw->ports[3].config.max_in_hop_id = 19;
176 sw->ports[3].config.max_out_hop_id = 19;
177 sw->ports[3].dual_link_port = &sw->ports[4];
179 sw->ports[4].config.type = TB_TYPE_PORT;
180 sw->ports[4].config.max_in_hop_id = 19;
181 sw->ports[4].config.max_out_hop_id = 19;
182 sw->ports[4].dual_link_port = &sw->ports[3];
183 sw->ports[4].link_nr = 1;
185 sw->ports[5].config.type = TB_TYPE_PORT;
186 sw->ports[5].config.max_in_hop_id = 19;
187 sw->ports[5].config.max_out_hop_id = 19;
188 sw->ports[5].dual_link_port = &sw->ports[6];
190 sw->ports[6].config.type = TB_TYPE_PORT;
191 sw->ports[6].config.max_in_hop_id = 19;
192 sw->ports[6].config.max_out_hop_id = 19;
193 sw->ports[6].dual_link_port = &sw->ports[5];
194 sw->ports[6].link_nr = 1;
196 sw->ports[7].config.type = TB_TYPE_PORT;
197 sw->ports[7].config.max_in_hop_id = 19;
198 sw->ports[7].config.max_out_hop_id = 19;
199 sw->ports[7].dual_link_port = &sw->ports[8];
201 sw->ports[8].config.type = TB_TYPE_PORT;
202 sw->ports[8].config.max_in_hop_id = 19;
203 sw->ports[8].config.max_out_hop_id = 19;
204 sw->ports[8].dual_link_port = &sw->ports[7];
205 sw->ports[8].link_nr = 1;
207 sw->ports[9].config.type = TB_TYPE_PCIE_UP;
208 sw->ports[9].config.max_in_hop_id = 8;
209 sw->ports[9].config.max_out_hop_id = 8;
211 sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
212 sw->ports[10].config.max_in_hop_id = 8;
213 sw->ports[10].config.max_out_hop_id = 8;
215 sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
216 sw->ports[11].config.max_in_hop_id = 8;
217 sw->ports[11].config.max_out_hop_id = 8;
219 sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
220 sw->ports[12].config.max_in_hop_id = 8;
221 sw->ports[12].config.max_out_hop_id = 8;
223 sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
224 sw->ports[13].config.max_in_hop_id = 9;
225 sw->ports[13].config.max_out_hop_id = 9;
226 sw->ports[13].cap_adap = -1;
228 sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
229 sw->ports[14].config.max_in_hop_id = 9;
230 sw->ports[14].config.max_out_hop_id = 9;
231 sw->ports[14].cap_adap = -1;
233 sw->ports[15].disabled = true;
235 sw->ports[16].config.type = TB_TYPE_USB3_UP;
236 sw->ports[16].config.max_in_hop_id = 8;
237 sw->ports[16].config.max_out_hop_id = 8;
239 sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
240 sw->ports[17].config.max_in_hop_id = 8;
241 sw->ports[17].config.max_out_hop_id = 8;
243 sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
244 sw->ports[18].config.max_in_hop_id = 8;
245 sw->ports[18].config.max_out_hop_id = 8;
247 sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
248 sw->ports[19].config.max_in_hop_id = 8;
249 sw->ports[19].config.max_out_hop_id = 8;
255 upstream_port = tb_upstream_port(sw);
256 port = tb_port_at(route, parent);
257 port->remote = upstream_port;
258 upstream_port->remote = port;
259 if (port->dual_link_port && upstream_port->dual_link_port) {
260 port->dual_link_port->remote = upstream_port->dual_link_port;
261 upstream_port->dual_link_port->remote = port->dual_link_port;
265 /* Bonding is used */
267 port->dual_link_port->bonded = true;
268 upstream_port->bonded = true;
269 upstream_port->dual_link_port->bonded = true;
275 static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
276 struct tb_switch *parent,
277 u64 route, bool bonded)
279 struct tb_switch *sw;
281 sw = alloc_dev_default(test, parent, route, bonded);
285 sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
286 sw->ports[13].config.max_in_hop_id = 9;
287 sw->ports[13].config.max_out_hop_id = 9;
289 sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
290 sw->ports[14].config.max_in_hop_id = 9;
291 sw->ports[14].config.max_out_hop_id = 9;
296 static void tb_test_path_basic(struct kunit *test)
298 struct tb_port *src_port, *dst_port, *p;
299 struct tb_switch *host;
301 host = alloc_host(test);
303 src_port = &host->ports[5];
306 p = tb_next_port_on_path(src_port, dst_port, NULL);
307 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
309 p = tb_next_port_on_path(src_port, dst_port, p);
310 KUNIT_EXPECT_TRUE(test, !p);
313 static void tb_test_path_not_connected_walk(struct kunit *test)
315 struct tb_port *src_port, *dst_port, *p;
316 struct tb_switch *host, *dev;
318 host = alloc_host(test);
319 /* No connection between host and dev */
320 dev = alloc_dev_default(test, NULL, 3, true);
322 src_port = &host->ports[12];
323 dst_port = &dev->ports[16];
325 p = tb_next_port_on_path(src_port, dst_port, NULL);
326 KUNIT_EXPECT_PTR_EQ(test, p, src_port);
328 p = tb_next_port_on_path(src_port, dst_port, p);
329 KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
331 p = tb_next_port_on_path(src_port, dst_port, p);
332 KUNIT_EXPECT_TRUE(test, !p);
334 /* Other direction */
336 p = tb_next_port_on_path(dst_port, src_port, NULL);
337 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
339 p = tb_next_port_on_path(dst_port, src_port, p);
340 KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
342 p = tb_next_port_on_path(dst_port, src_port, p);
343 KUNIT_EXPECT_TRUE(test, !p);
346 struct port_expectation {
349 enum tb_port_type type;
352 static void tb_test_path_single_hop_walk(struct kunit *test)
355 * Walks from Host PCIe downstream port to Device #1 PCIe
363 static const struct port_expectation test_data[] = {
364 { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
365 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
366 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
367 { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
369 struct tb_port *src_port, *dst_port, *p;
370 struct tb_switch *host, *dev;
373 host = alloc_host(test);
374 dev = alloc_dev_default(test, host, 1, true);
376 src_port = &host->ports[8];
377 dst_port = &dev->ports[9];
379 /* Walk both directions */
382 tb_for_each_port_on_path(src_port, dst_port, p) {
383 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
384 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
385 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
386 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
391 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
393 i = ARRAY_SIZE(test_data) - 1;
394 tb_for_each_port_on_path(dst_port, src_port, p) {
395 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
396 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
397 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
398 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
403 KUNIT_EXPECT_EQ(test, i, -1);
406 static void tb_test_path_daisy_chain_walk(struct kunit *test)
409 * Walks from Host DP IN to Device #2 DP OUT.
419 static const struct port_expectation test_data[] = {
420 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
421 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
422 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
423 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
424 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
425 { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
427 struct tb_port *src_port, *dst_port, *p;
428 struct tb_switch *host, *dev1, *dev2;
431 host = alloc_host(test);
432 dev1 = alloc_dev_default(test, host, 0x1, true);
433 dev2 = alloc_dev_default(test, dev1, 0x301, true);
435 src_port = &host->ports[5];
436 dst_port = &dev2->ports[13];
438 /* Walk both directions */
441 tb_for_each_port_on_path(src_port, dst_port, p) {
442 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
443 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
444 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
445 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
450 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
452 i = ARRAY_SIZE(test_data) - 1;
453 tb_for_each_port_on_path(dst_port, src_port, p) {
454 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
455 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
456 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
457 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
462 KUNIT_EXPECT_EQ(test, i, -1);
465 static void tb_test_path_simple_tree_walk(struct kunit *test)
468 * Walks from Host DP IN to Device #3 DP OUT.
476 * [Device #2] | [Device #4]
480 static const struct port_expectation test_data[] = {
481 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
482 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
483 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
484 { .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
485 { .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
486 { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
488 struct tb_port *src_port, *dst_port, *p;
489 struct tb_switch *host, *dev1, *dev3;
492 host = alloc_host(test);
493 dev1 = alloc_dev_default(test, host, 0x1, true);
494 alloc_dev_default(test, dev1, 0x301, true);
495 dev3 = alloc_dev_default(test, dev1, 0x501, true);
496 alloc_dev_default(test, dev1, 0x701, true);
498 src_port = &host->ports[5];
499 dst_port = &dev3->ports[13];
501 /* Walk both directions */
504 tb_for_each_port_on_path(src_port, dst_port, p) {
505 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
506 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
507 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
508 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
513 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
515 i = ARRAY_SIZE(test_data) - 1;
516 tb_for_each_port_on_path(dst_port, src_port, p) {
517 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
518 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
519 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
520 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
525 KUNIT_EXPECT_EQ(test, i, -1);
528 static void tb_test_path_complex_tree_walk(struct kunit *test)
531 * Walks from Device #3 DP IN to Device #9 DP OUT.
539 * [Device #2] | [Device #5]
541 * 1 | [Device #4] \ 1
542 * [Device #3] [Device #6]
551 static const struct port_expectation test_data[] = {
552 { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
553 { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
554 { .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
555 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
556 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
557 { .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
558 { .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
559 { .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
560 { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
561 { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
562 { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
563 { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
564 { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
565 { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
567 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
568 struct tb_port *src_port, *dst_port, *p;
571 host = alloc_host(test);
572 dev1 = alloc_dev_default(test, host, 0x1, true);
573 dev2 = alloc_dev_default(test, dev1, 0x301, true);
574 dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
575 alloc_dev_default(test, dev1, 0x501, true);
576 dev5 = alloc_dev_default(test, dev1, 0x701, true);
577 dev6 = alloc_dev_default(test, dev5, 0x70701, true);
578 dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
579 alloc_dev_default(test, dev7, 0x303070701, true);
580 dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
582 src_port = &dev3->ports[13];
583 dst_port = &dev9->ports[14];
585 /* Walk both directions */
588 tb_for_each_port_on_path(src_port, dst_port, p) {
589 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
590 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
591 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
592 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
597 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
599 i = ARRAY_SIZE(test_data) - 1;
600 tb_for_each_port_on_path(dst_port, src_port, p) {
601 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
602 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
603 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
604 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
609 KUNIT_EXPECT_EQ(test, i, -1);
612 static void tb_test_path_max_length_walk(struct kunit *test)
614 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
615 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
616 struct tb_port *src_port, *dst_port, *p;
620 * Walks from Device #6 DP IN to Device #12 DP OUT.
625 * [Device #1] [Device #7]
628 * [Device #2] [Device #8]
631 * [Device #3] [Device #9]
634 * [Device #4] [Device #10]
637 * [Device #5] [Device #11]
640 * [Device #6] [Device #12]
642 static const struct port_expectation test_data[] = {
643 { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
644 { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
645 { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
646 { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
647 { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
648 { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
649 { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
650 { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
651 { .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
652 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
653 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
654 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
655 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
656 { .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
657 { .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
658 { .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
659 { .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
660 { .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
661 { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
662 { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
663 { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
664 { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
665 { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
666 { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
667 { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
668 { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
671 host = alloc_host(test);
672 dev1 = alloc_dev_default(test, host, 0x1, true);
673 dev2 = alloc_dev_default(test, dev1, 0x301, true);
674 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
675 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
676 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
677 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
678 dev7 = alloc_dev_default(test, host, 0x3, true);
679 dev8 = alloc_dev_default(test, dev7, 0x303, true);
680 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
681 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
682 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
683 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
685 src_port = &dev6->ports[13];
686 dst_port = &dev12->ports[13];
688 /* Walk both directions */
691 tb_for_each_port_on_path(src_port, dst_port, p) {
692 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
693 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
694 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
695 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
700 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
702 i = ARRAY_SIZE(test_data) - 1;
703 tb_for_each_port_on_path(dst_port, src_port, p) {
704 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
705 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
706 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
707 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
712 KUNIT_EXPECT_EQ(test, i, -1);
715 static void tb_test_path_not_connected(struct kunit *test)
717 struct tb_switch *host, *dev1, *dev2;
718 struct tb_port *down, *up;
719 struct tb_path *path;
721 host = alloc_host(test);
722 dev1 = alloc_dev_default(test, host, 0x3, false);
723 /* Not connected to anything */
724 dev2 = alloc_dev_default(test, NULL, 0x303, false);
726 down = &dev1->ports[10];
727 up = &dev2->ports[9];
729 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
730 KUNIT_ASSERT_TRUE(test, path == NULL);
731 path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
732 KUNIT_ASSERT_TRUE(test, path == NULL);
735 struct hop_expectation {
738 enum tb_port_type in_type;
740 enum tb_port_type out_type;
743 static void tb_test_path_not_bonded_lane0(struct kunit *test)
746 * PCIe path from host to device using lane 0.
753 static const struct hop_expectation test_data[] = {
757 .in_type = TB_TYPE_PCIE_DOWN,
759 .out_type = TB_TYPE_PORT,
764 .in_type = TB_TYPE_PORT,
766 .out_type = TB_TYPE_PCIE_UP,
769 struct tb_switch *host, *dev;
770 struct tb_port *down, *up;
771 struct tb_path *path;
774 host = alloc_host(test);
775 dev = alloc_dev_default(test, host, 0x3, false);
777 down = &host->ports[9];
780 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
781 KUNIT_ASSERT_TRUE(test, path != NULL);
782 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
783 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
784 const struct tb_port *in_port, *out_port;
786 in_port = path->hops[i].in_port;
787 out_port = path->hops[i].out_port;
789 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
790 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
791 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
792 test_data[i].in_type);
793 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
794 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
795 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
796 test_data[i].out_type);
801 static void tb_test_path_not_bonded_lane1(struct kunit *test)
804 * DP Video path from host to device using lane 1. Paths like
805 * these are only used with Thunderbolt 1 devices where lane
806 * bonding is not possible. USB4 specifically does not allow
807 * paths like this (you either use lane 0 where lane 1 is
808 * disabled or both lanes are bonded).
815 static const struct hop_expectation test_data[] = {
819 .in_type = TB_TYPE_DP_HDMI_IN,
821 .out_type = TB_TYPE_PORT,
826 .in_type = TB_TYPE_PORT,
828 .out_type = TB_TYPE_DP_HDMI_OUT,
831 struct tb_switch *host, *dev;
832 struct tb_port *in, *out;
833 struct tb_path *path;
836 host = alloc_host(test);
837 dev = alloc_dev_default(test, host, 0x1, false);
839 in = &host->ports[5];
840 out = &dev->ports[13];
842 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
843 KUNIT_ASSERT_TRUE(test, path != NULL);
844 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
845 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
846 const struct tb_port *in_port, *out_port;
848 in_port = path->hops[i].in_port;
849 out_port = path->hops[i].out_port;
851 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
852 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
853 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
854 test_data[i].in_type);
855 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
856 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
857 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
858 test_data[i].out_type);
863 static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
866 * DP Video path from host to device 3 using lane 1.
879 static const struct hop_expectation test_data[] = {
883 .in_type = TB_TYPE_DP_HDMI_IN,
885 .out_type = TB_TYPE_PORT,
890 .in_type = TB_TYPE_PORT,
892 .out_type = TB_TYPE_PORT,
897 .in_type = TB_TYPE_PORT,
899 .out_type = TB_TYPE_PORT,
904 .in_type = TB_TYPE_PORT,
906 .out_type = TB_TYPE_DP_HDMI_OUT,
909 struct tb_switch *host, *dev1, *dev2, *dev3;
910 struct tb_port *in, *out;
911 struct tb_path *path;
914 host = alloc_host(test);
915 dev1 = alloc_dev_default(test, host, 0x1, false);
916 dev2 = alloc_dev_default(test, dev1, 0x701, false);
917 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
919 in = &host->ports[5];
920 out = &dev3->ports[13];
922 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
923 KUNIT_ASSERT_TRUE(test, path != NULL);
924 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
925 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
926 const struct tb_port *in_port, *out_port;
928 in_port = path->hops[i].in_port;
929 out_port = path->hops[i].out_port;
931 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
932 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
933 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
934 test_data[i].in_type);
935 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
936 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
937 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
938 test_data[i].out_type);
943 static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
946 * DP Video path from device 3 to host using lane 1.
959 static const struct hop_expectation test_data[] = {
963 .in_type = TB_TYPE_DP_HDMI_IN,
965 .out_type = TB_TYPE_PORT,
970 .in_type = TB_TYPE_PORT,
972 .out_type = TB_TYPE_PORT,
977 .in_type = TB_TYPE_PORT,
979 .out_type = TB_TYPE_PORT,
984 .in_type = TB_TYPE_PORT,
986 .out_type = TB_TYPE_DP_HDMI_IN,
989 struct tb_switch *host, *dev1, *dev2, *dev3;
990 struct tb_port *in, *out;
991 struct tb_path *path;
994 host = alloc_host(test);
995 dev1 = alloc_dev_default(test, host, 0x1, false);
996 dev2 = alloc_dev_default(test, dev1, 0x701, false);
997 dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
999 in = &dev3->ports[13];
1000 out = &host->ports[5];
1002 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1003 KUNIT_ASSERT_TRUE(test, path != NULL);
1004 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
1005 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1006 const struct tb_port *in_port, *out_port;
1008 in_port = path->hops[i].in_port;
1009 out_port = path->hops[i].out_port;
1011 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1012 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1013 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1014 test_data[i].in_type);
1015 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1016 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1017 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1018 test_data[i].out_type);
1023 static void tb_test_path_mixed_chain(struct kunit *test)
1026 * DP Video path from host to device 4 where first and last link
1043 static const struct hop_expectation test_data[] = {
1047 .in_type = TB_TYPE_DP_HDMI_IN,
1049 .out_type = TB_TYPE_PORT,
1054 .in_type = TB_TYPE_PORT,
1056 .out_type = TB_TYPE_PORT,
1061 .in_type = TB_TYPE_PORT,
1063 .out_type = TB_TYPE_PORT,
1068 .in_type = TB_TYPE_PORT,
1070 .out_type = TB_TYPE_PORT,
1075 .in_type = TB_TYPE_PORT,
1077 .out_type = TB_TYPE_DP_HDMI_OUT,
1080 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1081 struct tb_port *in, *out;
1082 struct tb_path *path;
1085 host = alloc_host(test);
1086 dev1 = alloc_dev_default(test, host, 0x1, true);
1087 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1088 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1089 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1091 in = &host->ports[5];
1092 out = &dev4->ports[13];
1094 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1095 KUNIT_ASSERT_TRUE(test, path != NULL);
1096 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
1097 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1098 const struct tb_port *in_port, *out_port;
1100 in_port = path->hops[i].in_port;
1101 out_port = path->hops[i].out_port;
1103 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1104 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1105 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1106 test_data[i].in_type);
1107 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1108 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1109 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1110 test_data[i].out_type);
1115 static void tb_test_path_mixed_chain_reverse(struct kunit *test)
1118 * DP Video path from device 4 to host where first and last link
1135 static const struct hop_expectation test_data[] = {
1139 .in_type = TB_TYPE_DP_HDMI_OUT,
1141 .out_type = TB_TYPE_PORT,
1146 .in_type = TB_TYPE_PORT,
1148 .out_type = TB_TYPE_PORT,
1153 .in_type = TB_TYPE_PORT,
1155 .out_type = TB_TYPE_PORT,
1160 .in_type = TB_TYPE_PORT,
1162 .out_type = TB_TYPE_PORT,
1167 .in_type = TB_TYPE_PORT,
1169 .out_type = TB_TYPE_DP_HDMI_IN,
1172 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1173 struct tb_port *in, *out;
1174 struct tb_path *path;
1177 host = alloc_host(test);
1178 dev1 = alloc_dev_default(test, host, 0x1, true);
1179 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1180 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1181 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1183 in = &dev4->ports[13];
1184 out = &host->ports[5];
1186 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1187 KUNIT_ASSERT_TRUE(test, path != NULL);
1188 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
1189 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1190 const struct tb_port *in_port, *out_port;
1192 in_port = path->hops[i].in_port;
1193 out_port = path->hops[i].out_port;
1195 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1196 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1197 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1198 test_data[i].in_type);
1199 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1200 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1201 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1202 test_data[i].out_type);
1207 static void tb_test_tunnel_pcie(struct kunit *test)
1209 struct tb_switch *host, *dev1, *dev2;
1210 struct tb_tunnel *tunnel1, *tunnel2;
1211 struct tb_port *down, *up;
1214 * Create PCIe tunnel between host and two devices.
1224 host = alloc_host(test);
1225 dev1 = alloc_dev_default(test, host, 0x1, true);
1226 dev2 = alloc_dev_default(test, dev1, 0x501, true);
1228 down = &host->ports[8];
1229 up = &dev1->ports[9];
1230 tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
1231 KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
1232 KUNIT_EXPECT_EQ(test, tunnel1->type, (enum tb_tunnel_type)TB_TUNNEL_PCI);
1233 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1234 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1235 KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
1236 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1237 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1238 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1239 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1240 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1241 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1243 down = &dev1->ports[10];
1244 up = &dev2->ports[9];
1245 tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
1246 KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
1247 KUNIT_EXPECT_EQ(test, tunnel2->type, (enum tb_tunnel_type)TB_TUNNEL_PCI);
1248 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1249 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1250 KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
1251 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1252 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1253 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1254 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1255 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1256 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1258 tb_tunnel_free(tunnel2);
1259 tb_tunnel_free(tunnel1);
1262 static void tb_test_tunnel_dp(struct kunit *test)
1264 struct tb_switch *host, *dev;
1265 struct tb_port *in, *out;
1266 struct tb_tunnel *tunnel;
1269 * Create DP tunnel between Host and Device
1276 host = alloc_host(test);
1277 dev = alloc_dev_default(test, host, 0x3, true);
1279 in = &host->ports[5];
1280 out = &dev->ports[13];
1282 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1283 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1284 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1285 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1286 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1287 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1288 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
1289 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1290 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
1291 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
1292 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1293 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
1294 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
1295 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1296 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
1297 tb_tunnel_free(tunnel);
1300 static void tb_test_tunnel_dp_chain(struct kunit *test)
1302 struct tb_switch *host, *dev1, *dev4;
1303 struct tb_port *in, *out;
1304 struct tb_tunnel *tunnel;
1307 * Create DP tunnel from Host DP IN to Device #4 DP OUT.
1315 * [Device #2] | [Device #4]
1319 host = alloc_host(test);
1320 dev1 = alloc_dev_default(test, host, 0x1, true);
1321 alloc_dev_default(test, dev1, 0x301, true);
1322 alloc_dev_default(test, dev1, 0x501, true);
1323 dev4 = alloc_dev_default(test, dev1, 0x701, true);
1325 in = &host->ports[5];
1326 out = &dev4->ports[14];
1328 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1329 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1330 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1331 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1332 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1333 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1334 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1335 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1336 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
1337 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1338 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1339 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
1340 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
1341 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1342 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
1343 tb_tunnel_free(tunnel);
1346 static void tb_test_tunnel_dp_tree(struct kunit *test)
1348 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
1349 struct tb_port *in, *out;
1350 struct tb_tunnel *tunnel;
1353 * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
1361 * [Device #2] | [Device #4]
1368 host = alloc_host(test);
1369 dev1 = alloc_dev_default(test, host, 0x3, true);
1370 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1371 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1372 alloc_dev_default(test, dev1, 0x703, true);
1373 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1375 in = &dev2->ports[13];
1376 out = &dev5->ports[13];
1378 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1379 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1380 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1381 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1382 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1383 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1384 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
1385 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1386 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
1387 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
1388 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1389 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
1390 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
1391 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1392 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
1393 tb_tunnel_free(tunnel);
1396 static void tb_test_tunnel_dp_max_length(struct kunit *test)
1398 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
1399 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
1400 struct tb_port *in, *out;
1401 struct tb_tunnel *tunnel;
1404 * Creates DP tunnel from Device #6 to Device #12.
1409 * [Device #1] [Device #7]
1412 * [Device #2] [Device #8]
1415 * [Device #3] [Device #9]
1418 * [Device #4] [Device #10]
1421 * [Device #5] [Device #11]
1424 * [Device #6] [Device #12]
1426 host = alloc_host(test);
1427 dev1 = alloc_dev_default(test, host, 0x1, true);
1428 dev2 = alloc_dev_default(test, dev1, 0x301, true);
1429 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
1430 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
1431 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
1432 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
1433 dev7 = alloc_dev_default(test, host, 0x3, true);
1434 dev8 = alloc_dev_default(test, dev7, 0x303, true);
1435 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
1436 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
1437 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
1438 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
1440 in = &dev6->ports[13];
1441 out = &dev12->ports[13];
1443 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1444 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1445 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1446 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1447 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1448 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1449 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
1451 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1453 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
1455 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
1458 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
1459 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
1460 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1461 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
1463 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
1465 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
1466 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
1467 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1468 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
1470 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
1472 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
1473 tb_tunnel_free(tunnel);
1476 static void tb_test_tunnel_usb3(struct kunit *test)
1478 struct tb_switch *host, *dev1, *dev2;
1479 struct tb_tunnel *tunnel1, *tunnel2;
1480 struct tb_port *down, *up;
1483 * Create USB3 tunnel between host and two devices.
1493 host = alloc_host(test);
1494 dev1 = alloc_dev_default(test, host, 0x1, true);
1495 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1497 down = &host->ports[12];
1498 up = &dev1->ports[16];
1499 tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1500 KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
1501 KUNIT_EXPECT_EQ(test, tunnel1->type, (enum tb_tunnel_type)TB_TUNNEL_USB3);
1502 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1503 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1504 KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
1505 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1506 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1507 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1508 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1509 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1510 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1512 down = &dev1->ports[17];
1513 up = &dev2->ports[16];
1514 tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1515 KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
1516 KUNIT_EXPECT_EQ(test, tunnel2->type, (enum tb_tunnel_type)TB_TUNNEL_USB3);
1517 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1518 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1519 KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
1520 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1521 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1522 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1523 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1524 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1525 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1527 tb_tunnel_free(tunnel2);
1528 tb_tunnel_free(tunnel1);
1531 static void tb_test_tunnel_port_on_path(struct kunit *test)
1533 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1534 struct tb_port *in, *out, *port;
1535 struct tb_tunnel *dp_tunnel;
1544 * [Device #2] | [Device #4]
1551 host = alloc_host(test);
1552 dev1 = alloc_dev_default(test, host, 0x3, true);
1553 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1554 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1555 dev4 = alloc_dev_default(test, dev1, 0x703, true);
1556 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1558 in = &dev2->ports[13];
1559 out = &dev5->ports[13];
1561 dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1562 KUNIT_ASSERT_TRUE(test, dp_tunnel != NULL);
1564 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
1565 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
1567 port = &host->ports[8];
1568 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1570 port = &host->ports[3];
1571 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1573 port = &dev1->ports[1];
1574 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1576 port = &dev1->ports[3];
1577 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1579 port = &dev1->ports[5];
1580 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1582 port = &dev1->ports[7];
1583 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1585 port = &dev3->ports[1];
1586 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1588 port = &dev5->ports[1];
1589 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1591 port = &dev4->ports[1];
1592 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1594 tb_tunnel_free(dp_tunnel);
1597 static struct kunit_case tb_test_cases[] = {
1598 KUNIT_CASE(tb_test_path_basic),
1599 KUNIT_CASE(tb_test_path_not_connected_walk),
1600 KUNIT_CASE(tb_test_path_single_hop_walk),
1601 KUNIT_CASE(tb_test_path_daisy_chain_walk),
1602 KUNIT_CASE(tb_test_path_simple_tree_walk),
1603 KUNIT_CASE(tb_test_path_complex_tree_walk),
1604 KUNIT_CASE(tb_test_path_max_length_walk),
1605 KUNIT_CASE(tb_test_path_not_connected),
1606 KUNIT_CASE(tb_test_path_not_bonded_lane0),
1607 KUNIT_CASE(tb_test_path_not_bonded_lane1),
1608 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
1609 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
1610 KUNIT_CASE(tb_test_path_mixed_chain),
1611 KUNIT_CASE(tb_test_path_mixed_chain_reverse),
1612 KUNIT_CASE(tb_test_tunnel_pcie),
1613 KUNIT_CASE(tb_test_tunnel_dp),
1614 KUNIT_CASE(tb_test_tunnel_dp_chain),
1615 KUNIT_CASE(tb_test_tunnel_dp_tree),
1616 KUNIT_CASE(tb_test_tunnel_dp_max_length),
1617 KUNIT_CASE(tb_test_tunnel_port_on_path),
1618 KUNIT_CASE(tb_test_tunnel_usb3),
1622 static struct kunit_suite tb_test_suite = {
1623 .name = "thunderbolt",
1624 .test_cases = tb_test_cases,
1627 static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL };
1629 int tb_test_init(void)
1631 return __kunit_test_suites_init(tb_test_suites);
1634 void tb_test_exit(void)
1636 return __kunit_test_suites_exit(tb_test_suites);