1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2020, Intel Corporation
9 #include <kunit/test.h>
10 #include <linux/idr.h>
15 static int __ida_init(struct kunit_resource *res, void *context)
17 struct ida *ida = context;
24 static void __ida_destroy(struct kunit_resource *res)
26 struct ida *ida = res->data;
31 static void kunit_ida_init(struct kunit *test, struct ida *ida)
33 kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
36 static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
37 u8 upstream_port, u8 max_port_number)
43 sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
47 sw->config.upstream_port_number = upstream_port;
48 sw->config.depth = tb_route_length(route);
49 sw->config.route_hi = upper_32_bits(route);
50 sw->config.route_lo = lower_32_bits(route);
51 sw->config.enabled = 0;
52 sw->config.max_port_number = max_port_number;
54 size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
55 sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
59 for (i = 0; i <= sw->config.max_port_number; i++) {
61 sw->ports[i].port = i;
62 sw->ports[i].config.port_number = i;
64 kunit_ida_init(test, &sw->ports[i].in_hopids);
65 kunit_ida_init(test, &sw->ports[i].out_hopids);
72 static struct tb_switch *alloc_host(struct kunit *test)
76 sw = alloc_switch(test, 0, 7, 13);
80 sw->config.vendor_id = 0x8086;
81 sw->config.device_id = 0x9a1b;
83 sw->ports[0].config.type = TB_TYPE_PORT;
84 sw->ports[0].config.max_in_hop_id = 7;
85 sw->ports[0].config.max_out_hop_id = 7;
87 sw->ports[1].config.type = TB_TYPE_PORT;
88 sw->ports[1].config.max_in_hop_id = 19;
89 sw->ports[1].config.max_out_hop_id = 19;
90 sw->ports[1].total_credits = 60;
91 sw->ports[1].ctl_credits = 2;
92 sw->ports[1].dual_link_port = &sw->ports[2];
94 sw->ports[2].config.type = TB_TYPE_PORT;
95 sw->ports[2].config.max_in_hop_id = 19;
96 sw->ports[2].config.max_out_hop_id = 19;
97 sw->ports[2].total_credits = 60;
98 sw->ports[2].ctl_credits = 2;
99 sw->ports[2].dual_link_port = &sw->ports[1];
100 sw->ports[2].link_nr = 1;
102 sw->ports[3].config.type = TB_TYPE_PORT;
103 sw->ports[3].config.max_in_hop_id = 19;
104 sw->ports[3].config.max_out_hop_id = 19;
105 sw->ports[3].total_credits = 60;
106 sw->ports[3].ctl_credits = 2;
107 sw->ports[3].dual_link_port = &sw->ports[4];
109 sw->ports[4].config.type = TB_TYPE_PORT;
110 sw->ports[4].config.max_in_hop_id = 19;
111 sw->ports[4].config.max_out_hop_id = 19;
112 sw->ports[4].total_credits = 60;
113 sw->ports[4].ctl_credits = 2;
114 sw->ports[4].dual_link_port = &sw->ports[3];
115 sw->ports[4].link_nr = 1;
117 sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
118 sw->ports[5].config.max_in_hop_id = 9;
119 sw->ports[5].config.max_out_hop_id = 9;
120 sw->ports[5].cap_adap = -1;
122 sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
123 sw->ports[6].config.max_in_hop_id = 9;
124 sw->ports[6].config.max_out_hop_id = 9;
125 sw->ports[6].cap_adap = -1;
127 sw->ports[7].config.type = TB_TYPE_NHI;
128 sw->ports[7].config.max_in_hop_id = 11;
129 sw->ports[7].config.max_out_hop_id = 11;
130 sw->ports[7].config.nfc_credits = 0x41800000;
132 sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
133 sw->ports[8].config.max_in_hop_id = 8;
134 sw->ports[8].config.max_out_hop_id = 8;
136 sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
137 sw->ports[9].config.max_in_hop_id = 8;
138 sw->ports[9].config.max_out_hop_id = 8;
140 sw->ports[10].disabled = true;
141 sw->ports[11].disabled = true;
143 sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
144 sw->ports[12].config.max_in_hop_id = 8;
145 sw->ports[12].config.max_out_hop_id = 8;
147 sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
148 sw->ports[13].config.max_in_hop_id = 8;
149 sw->ports[13].config.max_out_hop_id = 8;
154 static struct tb_switch *alloc_host_usb4(struct kunit *test)
156 struct tb_switch *sw;
158 sw = alloc_host(test);
163 sw->credit_allocation = true;
164 sw->max_usb3_credits = 32;
165 sw->min_dp_aux_credits = 1;
166 sw->min_dp_main_credits = 0;
167 sw->max_pcie_credits = 64;
168 sw->max_dma_credits = 14;
173 static struct tb_switch *alloc_dev_default(struct kunit *test,
174 struct tb_switch *parent,
175 u64 route, bool bonded)
177 struct tb_port *port, *upstream_port;
178 struct tb_switch *sw;
180 sw = alloc_switch(test, route, 1, 19);
184 sw->config.vendor_id = 0x8086;
185 sw->config.device_id = 0x15ef;
187 sw->ports[0].config.type = TB_TYPE_PORT;
188 sw->ports[0].config.max_in_hop_id = 8;
189 sw->ports[0].config.max_out_hop_id = 8;
191 sw->ports[1].config.type = TB_TYPE_PORT;
192 sw->ports[1].config.max_in_hop_id = 19;
193 sw->ports[1].config.max_out_hop_id = 19;
194 sw->ports[1].total_credits = 60;
195 sw->ports[1].ctl_credits = 2;
196 sw->ports[1].dual_link_port = &sw->ports[2];
198 sw->ports[2].config.type = TB_TYPE_PORT;
199 sw->ports[2].config.max_in_hop_id = 19;
200 sw->ports[2].config.max_out_hop_id = 19;
201 sw->ports[2].total_credits = 60;
202 sw->ports[2].ctl_credits = 2;
203 sw->ports[2].dual_link_port = &sw->ports[1];
204 sw->ports[2].link_nr = 1;
206 sw->ports[3].config.type = TB_TYPE_PORT;
207 sw->ports[3].config.max_in_hop_id = 19;
208 sw->ports[3].config.max_out_hop_id = 19;
209 sw->ports[3].total_credits = 60;
210 sw->ports[3].ctl_credits = 2;
211 sw->ports[3].dual_link_port = &sw->ports[4];
213 sw->ports[4].config.type = TB_TYPE_PORT;
214 sw->ports[4].config.max_in_hop_id = 19;
215 sw->ports[4].config.max_out_hop_id = 19;
216 sw->ports[4].total_credits = 60;
217 sw->ports[4].ctl_credits = 2;
218 sw->ports[4].dual_link_port = &sw->ports[3];
219 sw->ports[4].link_nr = 1;
221 sw->ports[5].config.type = TB_TYPE_PORT;
222 sw->ports[5].config.max_in_hop_id = 19;
223 sw->ports[5].config.max_out_hop_id = 19;
224 sw->ports[5].total_credits = 60;
225 sw->ports[5].ctl_credits = 2;
226 sw->ports[5].dual_link_port = &sw->ports[6];
228 sw->ports[6].config.type = TB_TYPE_PORT;
229 sw->ports[6].config.max_in_hop_id = 19;
230 sw->ports[6].config.max_out_hop_id = 19;
231 sw->ports[6].total_credits = 60;
232 sw->ports[6].ctl_credits = 2;
233 sw->ports[6].dual_link_port = &sw->ports[5];
234 sw->ports[6].link_nr = 1;
236 sw->ports[7].config.type = TB_TYPE_PORT;
237 sw->ports[7].config.max_in_hop_id = 19;
238 sw->ports[7].config.max_out_hop_id = 19;
239 sw->ports[7].total_credits = 60;
240 sw->ports[7].ctl_credits = 2;
241 sw->ports[7].dual_link_port = &sw->ports[8];
243 sw->ports[8].config.type = TB_TYPE_PORT;
244 sw->ports[8].config.max_in_hop_id = 19;
245 sw->ports[8].config.max_out_hop_id = 19;
246 sw->ports[8].total_credits = 60;
247 sw->ports[8].ctl_credits = 2;
248 sw->ports[8].dual_link_port = &sw->ports[7];
249 sw->ports[8].link_nr = 1;
251 sw->ports[9].config.type = TB_TYPE_PCIE_UP;
252 sw->ports[9].config.max_in_hop_id = 8;
253 sw->ports[9].config.max_out_hop_id = 8;
255 sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
256 sw->ports[10].config.max_in_hop_id = 8;
257 sw->ports[10].config.max_out_hop_id = 8;
259 sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
260 sw->ports[11].config.max_in_hop_id = 8;
261 sw->ports[11].config.max_out_hop_id = 8;
263 sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
264 sw->ports[12].config.max_in_hop_id = 8;
265 sw->ports[12].config.max_out_hop_id = 8;
267 sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
268 sw->ports[13].config.max_in_hop_id = 9;
269 sw->ports[13].config.max_out_hop_id = 9;
270 sw->ports[13].cap_adap = -1;
272 sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
273 sw->ports[14].config.max_in_hop_id = 9;
274 sw->ports[14].config.max_out_hop_id = 9;
275 sw->ports[14].cap_adap = -1;
277 sw->ports[15].disabled = true;
279 sw->ports[16].config.type = TB_TYPE_USB3_UP;
280 sw->ports[16].config.max_in_hop_id = 8;
281 sw->ports[16].config.max_out_hop_id = 8;
283 sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
284 sw->ports[17].config.max_in_hop_id = 8;
285 sw->ports[17].config.max_out_hop_id = 8;
287 sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
288 sw->ports[18].config.max_in_hop_id = 8;
289 sw->ports[18].config.max_out_hop_id = 8;
291 sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
292 sw->ports[19].config.max_in_hop_id = 8;
293 sw->ports[19].config.max_out_hop_id = 8;
299 upstream_port = tb_upstream_port(sw);
300 port = tb_port_at(route, parent);
301 port->remote = upstream_port;
302 upstream_port->remote = port;
303 if (port->dual_link_port && upstream_port->dual_link_port) {
304 port->dual_link_port->remote = upstream_port->dual_link_port;
305 upstream_port->dual_link_port->remote = port->dual_link_port;
308 /* Bonding is used */
310 port->total_credits *= 2;
311 port->dual_link_port->bonded = true;
312 port->dual_link_port->total_credits = 0;
313 upstream_port->bonded = true;
314 upstream_port->total_credits *= 2;
315 upstream_port->dual_link_port->bonded = true;
316 upstream_port->dual_link_port->total_credits = 0;
323 static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
324 struct tb_switch *parent,
325 u64 route, bool bonded)
327 struct tb_switch *sw;
329 sw = alloc_dev_default(test, parent, route, bonded);
333 sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
334 sw->ports[13].config.max_in_hop_id = 9;
335 sw->ports[13].config.max_out_hop_id = 9;
337 sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
338 sw->ports[14].config.max_in_hop_id = 9;
339 sw->ports[14].config.max_out_hop_id = 9;
344 static struct tb_switch *alloc_dev_usb4(struct kunit *test,
345 struct tb_switch *parent,
346 u64 route, bool bonded)
348 struct tb_switch *sw;
350 sw = alloc_dev_default(test, parent, route, bonded);
355 sw->credit_allocation = true;
356 sw->max_usb3_credits = 14;
357 sw->min_dp_aux_credits = 1;
358 sw->min_dp_main_credits = 18;
359 sw->max_pcie_credits = 32;
360 sw->max_dma_credits = 14;
365 static void tb_test_path_basic(struct kunit *test)
367 struct tb_port *src_port, *dst_port, *p;
368 struct tb_switch *host;
370 host = alloc_host(test);
372 src_port = &host->ports[5];
375 p = tb_next_port_on_path(src_port, dst_port, NULL);
376 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
378 p = tb_next_port_on_path(src_port, dst_port, p);
379 KUNIT_EXPECT_TRUE(test, !p);
382 static void tb_test_path_not_connected_walk(struct kunit *test)
384 struct tb_port *src_port, *dst_port, *p;
385 struct tb_switch *host, *dev;
387 host = alloc_host(test);
388 /* No connection between host and dev */
389 dev = alloc_dev_default(test, NULL, 3, true);
391 src_port = &host->ports[12];
392 dst_port = &dev->ports[16];
394 p = tb_next_port_on_path(src_port, dst_port, NULL);
395 KUNIT_EXPECT_PTR_EQ(test, p, src_port);
397 p = tb_next_port_on_path(src_port, dst_port, p);
398 KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
400 p = tb_next_port_on_path(src_port, dst_port, p);
401 KUNIT_EXPECT_TRUE(test, !p);
403 /* Other direction */
405 p = tb_next_port_on_path(dst_port, src_port, NULL);
406 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
408 p = tb_next_port_on_path(dst_port, src_port, p);
409 KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
411 p = tb_next_port_on_path(dst_port, src_port, p);
412 KUNIT_EXPECT_TRUE(test, !p);
415 struct port_expectation {
418 enum tb_port_type type;
421 static void tb_test_path_single_hop_walk(struct kunit *test)
424 * Walks from Host PCIe downstream port to Device #1 PCIe
432 static const struct port_expectation test_data[] = {
433 { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
434 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
435 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
436 { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
438 struct tb_port *src_port, *dst_port, *p;
439 struct tb_switch *host, *dev;
442 host = alloc_host(test);
443 dev = alloc_dev_default(test, host, 1, true);
445 src_port = &host->ports[8];
446 dst_port = &dev->ports[9];
448 /* Walk both directions */
451 tb_for_each_port_on_path(src_port, dst_port, p) {
452 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
453 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
454 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
455 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
460 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
462 i = ARRAY_SIZE(test_data) - 1;
463 tb_for_each_port_on_path(dst_port, src_port, p) {
464 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
465 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
466 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
467 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
472 KUNIT_EXPECT_EQ(test, i, -1);
475 static void tb_test_path_daisy_chain_walk(struct kunit *test)
478 * Walks from Host DP IN to Device #2 DP OUT.
488 static const struct port_expectation test_data[] = {
489 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
490 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
491 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
492 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
493 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
494 { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
496 struct tb_port *src_port, *dst_port, *p;
497 struct tb_switch *host, *dev1, *dev2;
500 host = alloc_host(test);
501 dev1 = alloc_dev_default(test, host, 0x1, true);
502 dev2 = alloc_dev_default(test, dev1, 0x301, true);
504 src_port = &host->ports[5];
505 dst_port = &dev2->ports[13];
507 /* Walk both directions */
510 tb_for_each_port_on_path(src_port, dst_port, p) {
511 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
512 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
513 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
514 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
519 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
521 i = ARRAY_SIZE(test_data) - 1;
522 tb_for_each_port_on_path(dst_port, src_port, p) {
523 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
524 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
525 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
526 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
531 KUNIT_EXPECT_EQ(test, i, -1);
534 static void tb_test_path_simple_tree_walk(struct kunit *test)
537 * Walks from Host DP IN to Device #3 DP OUT.
545 * [Device #2] | [Device #4]
549 static const struct port_expectation test_data[] = {
550 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
551 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
552 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
553 { .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
554 { .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
555 { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
557 struct tb_port *src_port, *dst_port, *p;
558 struct tb_switch *host, *dev1, *dev3;
561 host = alloc_host(test);
562 dev1 = alloc_dev_default(test, host, 0x1, true);
563 alloc_dev_default(test, dev1, 0x301, true);
564 dev3 = alloc_dev_default(test, dev1, 0x501, true);
565 alloc_dev_default(test, dev1, 0x701, true);
567 src_port = &host->ports[5];
568 dst_port = &dev3->ports[13];
570 /* Walk both directions */
573 tb_for_each_port_on_path(src_port, dst_port, p) {
574 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
575 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
576 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
577 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
582 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
584 i = ARRAY_SIZE(test_data) - 1;
585 tb_for_each_port_on_path(dst_port, src_port, p) {
586 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
587 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
588 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
589 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
594 KUNIT_EXPECT_EQ(test, i, -1);
597 static void tb_test_path_complex_tree_walk(struct kunit *test)
600 * Walks from Device #3 DP IN to Device #9 DP OUT.
608 * [Device #2] | [Device #5]
610 * 1 | [Device #4] \ 1
611 * [Device #3] [Device #6]
620 static const struct port_expectation test_data[] = {
621 { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
622 { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
623 { .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
624 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
625 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
626 { .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
627 { .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
628 { .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
629 { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
630 { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
631 { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
632 { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
633 { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
634 { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
636 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
637 struct tb_port *src_port, *dst_port, *p;
640 host = alloc_host(test);
641 dev1 = alloc_dev_default(test, host, 0x1, true);
642 dev2 = alloc_dev_default(test, dev1, 0x301, true);
643 dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
644 alloc_dev_default(test, dev1, 0x501, true);
645 dev5 = alloc_dev_default(test, dev1, 0x701, true);
646 dev6 = alloc_dev_default(test, dev5, 0x70701, true);
647 dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
648 alloc_dev_default(test, dev7, 0x303070701, true);
649 dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
651 src_port = &dev3->ports[13];
652 dst_port = &dev9->ports[14];
654 /* Walk both directions */
657 tb_for_each_port_on_path(src_port, dst_port, p) {
658 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
659 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
660 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
661 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
666 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
668 i = ARRAY_SIZE(test_data) - 1;
669 tb_for_each_port_on_path(dst_port, src_port, p) {
670 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
671 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
672 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
673 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
678 KUNIT_EXPECT_EQ(test, i, -1);
681 static void tb_test_path_max_length_walk(struct kunit *test)
683 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
684 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
685 struct tb_port *src_port, *dst_port, *p;
689 * Walks from Device #6 DP IN to Device #12 DP OUT.
694 * [Device #1] [Device #7]
697 * [Device #2] [Device #8]
700 * [Device #3] [Device #9]
703 * [Device #4] [Device #10]
706 * [Device #5] [Device #11]
709 * [Device #6] [Device #12]
711 static const struct port_expectation test_data[] = {
712 { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
713 { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
714 { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
715 { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
716 { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
717 { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
718 { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
719 { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
720 { .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
721 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
722 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
723 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
724 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
725 { .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
726 { .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
727 { .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
728 { .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
729 { .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
730 { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
731 { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
732 { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
733 { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
734 { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
735 { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
736 { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
737 { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
740 host = alloc_host(test);
741 dev1 = alloc_dev_default(test, host, 0x1, true);
742 dev2 = alloc_dev_default(test, dev1, 0x301, true);
743 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
744 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
745 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
746 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
747 dev7 = alloc_dev_default(test, host, 0x3, true);
748 dev8 = alloc_dev_default(test, dev7, 0x303, true);
749 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
750 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
751 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
752 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
754 src_port = &dev6->ports[13];
755 dst_port = &dev12->ports[13];
757 /* Walk both directions */
760 tb_for_each_port_on_path(src_port, dst_port, p) {
761 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
762 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
763 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
764 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
769 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
771 i = ARRAY_SIZE(test_data) - 1;
772 tb_for_each_port_on_path(dst_port, src_port, p) {
773 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
774 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
775 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
776 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
781 KUNIT_EXPECT_EQ(test, i, -1);
784 static void tb_test_path_not_connected(struct kunit *test)
786 struct tb_switch *host, *dev1, *dev2;
787 struct tb_port *down, *up;
788 struct tb_path *path;
790 host = alloc_host(test);
791 dev1 = alloc_dev_default(test, host, 0x3, false);
792 /* Not connected to anything */
793 dev2 = alloc_dev_default(test, NULL, 0x303, false);
795 down = &dev1->ports[10];
796 up = &dev2->ports[9];
798 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
799 KUNIT_ASSERT_TRUE(test, path == NULL);
800 path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
801 KUNIT_ASSERT_TRUE(test, path == NULL);
804 struct hop_expectation {
807 enum tb_port_type in_type;
809 enum tb_port_type out_type;
812 static void tb_test_path_not_bonded_lane0(struct kunit *test)
815 * PCIe path from host to device using lane 0.
822 static const struct hop_expectation test_data[] = {
826 .in_type = TB_TYPE_PCIE_DOWN,
828 .out_type = TB_TYPE_PORT,
833 .in_type = TB_TYPE_PORT,
835 .out_type = TB_TYPE_PCIE_UP,
838 struct tb_switch *host, *dev;
839 struct tb_port *down, *up;
840 struct tb_path *path;
843 host = alloc_host(test);
844 dev = alloc_dev_default(test, host, 0x3, false);
846 down = &host->ports[9];
849 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
850 KUNIT_ASSERT_TRUE(test, path != NULL);
851 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
852 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
853 const struct tb_port *in_port, *out_port;
855 in_port = path->hops[i].in_port;
856 out_port = path->hops[i].out_port;
858 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
859 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
860 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
861 test_data[i].in_type);
862 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
863 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
864 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
865 test_data[i].out_type);
870 static void tb_test_path_not_bonded_lane1(struct kunit *test)
873 * DP Video path from host to device using lane 1. Paths like
874 * these are only used with Thunderbolt 1 devices where lane
875 * bonding is not possible. USB4 specifically does not allow
876 * paths like this (you either use lane 0 where lane 1 is
877 * disabled or both lanes are bonded).
884 static const struct hop_expectation test_data[] = {
888 .in_type = TB_TYPE_DP_HDMI_IN,
890 .out_type = TB_TYPE_PORT,
895 .in_type = TB_TYPE_PORT,
897 .out_type = TB_TYPE_DP_HDMI_OUT,
900 struct tb_switch *host, *dev;
901 struct tb_port *in, *out;
902 struct tb_path *path;
905 host = alloc_host(test);
906 dev = alloc_dev_default(test, host, 0x1, false);
908 in = &host->ports[5];
909 out = &dev->ports[13];
911 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
912 KUNIT_ASSERT_TRUE(test, path != NULL);
913 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
914 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
915 const struct tb_port *in_port, *out_port;
917 in_port = path->hops[i].in_port;
918 out_port = path->hops[i].out_port;
920 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
921 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
922 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
923 test_data[i].in_type);
924 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
925 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
926 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
927 test_data[i].out_type);
932 static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
935 * DP Video path from host to device 3 using lane 1.
948 static const struct hop_expectation test_data[] = {
952 .in_type = TB_TYPE_DP_HDMI_IN,
954 .out_type = TB_TYPE_PORT,
959 .in_type = TB_TYPE_PORT,
961 .out_type = TB_TYPE_PORT,
966 .in_type = TB_TYPE_PORT,
968 .out_type = TB_TYPE_PORT,
973 .in_type = TB_TYPE_PORT,
975 .out_type = TB_TYPE_DP_HDMI_OUT,
978 struct tb_switch *host, *dev1, *dev2, *dev3;
979 struct tb_port *in, *out;
980 struct tb_path *path;
983 host = alloc_host(test);
984 dev1 = alloc_dev_default(test, host, 0x1, false);
985 dev2 = alloc_dev_default(test, dev1, 0x701, false);
986 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
988 in = &host->ports[5];
989 out = &dev3->ports[13];
991 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
992 KUNIT_ASSERT_TRUE(test, path != NULL);
993 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
994 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
995 const struct tb_port *in_port, *out_port;
997 in_port = path->hops[i].in_port;
998 out_port = path->hops[i].out_port;
1000 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1001 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1002 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1003 test_data[i].in_type);
1004 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1005 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1006 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1007 test_data[i].out_type);
1012 static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
1015 * DP Video path from device 3 to host using lane 1.
1028 static const struct hop_expectation test_data[] = {
1032 .in_type = TB_TYPE_DP_HDMI_IN,
1034 .out_type = TB_TYPE_PORT,
1039 .in_type = TB_TYPE_PORT,
1041 .out_type = TB_TYPE_PORT,
1046 .in_type = TB_TYPE_PORT,
1048 .out_type = TB_TYPE_PORT,
1053 .in_type = TB_TYPE_PORT,
1055 .out_type = TB_TYPE_DP_HDMI_IN,
1058 struct tb_switch *host, *dev1, *dev2, *dev3;
1059 struct tb_port *in, *out;
1060 struct tb_path *path;
1063 host = alloc_host(test);
1064 dev1 = alloc_dev_default(test, host, 0x1, false);
1065 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1066 dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
1068 in = &dev3->ports[13];
1069 out = &host->ports[5];
1071 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1072 KUNIT_ASSERT_TRUE(test, path != NULL);
1073 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1074 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1075 const struct tb_port *in_port, *out_port;
1077 in_port = path->hops[i].in_port;
1078 out_port = path->hops[i].out_port;
1080 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1081 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1082 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1083 test_data[i].in_type);
1084 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1085 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1086 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1087 test_data[i].out_type);
1092 static void tb_test_path_mixed_chain(struct kunit *test)
1095 * DP Video path from host to device 4 where first and last link
1112 static const struct hop_expectation test_data[] = {
1116 .in_type = TB_TYPE_DP_HDMI_IN,
1118 .out_type = TB_TYPE_PORT,
1123 .in_type = TB_TYPE_PORT,
1125 .out_type = TB_TYPE_PORT,
1130 .in_type = TB_TYPE_PORT,
1132 .out_type = TB_TYPE_PORT,
1137 .in_type = TB_TYPE_PORT,
1139 .out_type = TB_TYPE_PORT,
1144 .in_type = TB_TYPE_PORT,
1146 .out_type = TB_TYPE_DP_HDMI_OUT,
1149 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1150 struct tb_port *in, *out;
1151 struct tb_path *path;
1154 host = alloc_host(test);
1155 dev1 = alloc_dev_default(test, host, 0x1, true);
1156 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1157 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1158 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1160 in = &host->ports[5];
1161 out = &dev4->ports[13];
1163 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1164 KUNIT_ASSERT_TRUE(test, path != NULL);
1165 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1166 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1167 const struct tb_port *in_port, *out_port;
1169 in_port = path->hops[i].in_port;
1170 out_port = path->hops[i].out_port;
1172 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1173 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1174 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1175 test_data[i].in_type);
1176 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1177 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1178 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1179 test_data[i].out_type);
1184 static void tb_test_path_mixed_chain_reverse(struct kunit *test)
1187 * DP Video path from device 4 to host where first and last link
1204 static const struct hop_expectation test_data[] = {
1208 .in_type = TB_TYPE_DP_HDMI_OUT,
1210 .out_type = TB_TYPE_PORT,
1215 .in_type = TB_TYPE_PORT,
1217 .out_type = TB_TYPE_PORT,
1222 .in_type = TB_TYPE_PORT,
1224 .out_type = TB_TYPE_PORT,
1229 .in_type = TB_TYPE_PORT,
1231 .out_type = TB_TYPE_PORT,
1236 .in_type = TB_TYPE_PORT,
1238 .out_type = TB_TYPE_DP_HDMI_IN,
1241 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1242 struct tb_port *in, *out;
1243 struct tb_path *path;
1246 host = alloc_host(test);
1247 dev1 = alloc_dev_default(test, host, 0x1, true);
1248 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1249 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1250 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1252 in = &dev4->ports[13];
1253 out = &host->ports[5];
1255 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1256 KUNIT_ASSERT_TRUE(test, path != NULL);
1257 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1258 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1259 const struct tb_port *in_port, *out_port;
1261 in_port = path->hops[i].in_port;
1262 out_port = path->hops[i].out_port;
1264 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1265 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1266 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1267 test_data[i].in_type);
1268 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1269 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1270 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1271 test_data[i].out_type);
1276 static void tb_test_tunnel_pcie(struct kunit *test)
1278 struct tb_switch *host, *dev1, *dev2;
1279 struct tb_tunnel *tunnel1, *tunnel2;
1280 struct tb_port *down, *up;
1283 * Create PCIe tunnel between host and two devices.
1293 host = alloc_host(test);
1294 dev1 = alloc_dev_default(test, host, 0x1, true);
1295 dev2 = alloc_dev_default(test, dev1, 0x501, true);
1297 down = &host->ports[8];
1298 up = &dev1->ports[9];
1299 tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
1300 KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
1301 KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI);
1302 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1303 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1304 KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1305 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1306 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1307 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1308 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1309 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1310 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1312 down = &dev1->ports[10];
1313 up = &dev2->ports[9];
1314 tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
1315 KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
1316 KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI);
1317 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1318 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1319 KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1320 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1321 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1322 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1323 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1324 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1325 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1327 tb_tunnel_free(tunnel2);
1328 tb_tunnel_free(tunnel1);
1331 static void tb_test_tunnel_dp(struct kunit *test)
1333 struct tb_switch *host, *dev;
1334 struct tb_port *in, *out;
1335 struct tb_tunnel *tunnel;
1338 * Create DP tunnel between Host and Device
1345 host = alloc_host(test);
1346 dev = alloc_dev_default(test, host, 0x3, true);
1348 in = &host->ports[5];
1349 out = &dev->ports[13];
1351 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1352 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1353 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1354 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1355 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1356 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1357 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
1358 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1359 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
1360 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
1361 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1362 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
1363 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
1364 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1365 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
1366 tb_tunnel_free(tunnel);
1369 static void tb_test_tunnel_dp_chain(struct kunit *test)
1371 struct tb_switch *host, *dev1, *dev4;
1372 struct tb_port *in, *out;
1373 struct tb_tunnel *tunnel;
1376 * Create DP tunnel from Host DP IN to Device #4 DP OUT.
1384 * [Device #2] | [Device #4]
1388 host = alloc_host(test);
1389 dev1 = alloc_dev_default(test, host, 0x1, true);
1390 alloc_dev_default(test, dev1, 0x301, true);
1391 alloc_dev_default(test, dev1, 0x501, true);
1392 dev4 = alloc_dev_default(test, dev1, 0x701, true);
1394 in = &host->ports[5];
1395 out = &dev4->ports[14];
1397 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1398 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1399 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1400 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1401 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1402 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1403 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1404 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1405 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
1406 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1407 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1408 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
1409 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
1410 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1411 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
1412 tb_tunnel_free(tunnel);
1415 static void tb_test_tunnel_dp_tree(struct kunit *test)
1417 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
1418 struct tb_port *in, *out;
1419 struct tb_tunnel *tunnel;
1422 * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
1430 * [Device #2] | [Device #4]
1437 host = alloc_host(test);
1438 dev1 = alloc_dev_default(test, host, 0x3, true);
1439 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1440 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1441 alloc_dev_default(test, dev1, 0x703, true);
1442 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1444 in = &dev2->ports[13];
1445 out = &dev5->ports[13];
1447 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1448 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1449 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1450 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1451 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1452 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1453 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
1454 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1455 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
1456 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
1457 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1458 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
1459 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
1460 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1461 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
1462 tb_tunnel_free(tunnel);
1465 static void tb_test_tunnel_dp_max_length(struct kunit *test)
1467 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
1468 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
1469 struct tb_port *in, *out;
1470 struct tb_tunnel *tunnel;
1473 * Creates DP tunnel from Device #6 to Device #12.
1478 * [Device #1] [Device #7]
1481 * [Device #2] [Device #8]
1484 * [Device #3] [Device #9]
1487 * [Device #4] [Device #10]
1490 * [Device #5] [Device #11]
1493 * [Device #6] [Device #12]
1495 host = alloc_host(test);
1496 dev1 = alloc_dev_default(test, host, 0x1, true);
1497 dev2 = alloc_dev_default(test, dev1, 0x301, true);
1498 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
1499 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
1500 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
1501 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
1502 dev7 = alloc_dev_default(test, host, 0x3, true);
1503 dev8 = alloc_dev_default(test, dev7, 0x303, true);
1504 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
1505 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
1506 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
1507 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
1509 in = &dev6->ports[13];
1510 out = &dev12->ports[13];
1512 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1513 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1514 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1515 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1516 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1517 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1518 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
1520 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1522 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
1524 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
1527 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
1528 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
1529 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1530 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
1532 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
1534 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
1535 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
1536 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1537 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
1539 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
1541 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
1542 tb_tunnel_free(tunnel);
1545 static void tb_test_tunnel_usb3(struct kunit *test)
1547 struct tb_switch *host, *dev1, *dev2;
1548 struct tb_tunnel *tunnel1, *tunnel2;
1549 struct tb_port *down, *up;
1552 * Create USB3 tunnel between host and two devices.
1562 host = alloc_host(test);
1563 dev1 = alloc_dev_default(test, host, 0x1, true);
1564 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1566 down = &host->ports[12];
1567 up = &dev1->ports[16];
1568 tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1569 KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
1570 KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3);
1571 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1572 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1573 KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1574 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1575 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1576 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1577 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1578 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1579 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1581 down = &dev1->ports[17];
1582 up = &dev2->ports[16];
1583 tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1584 KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
1585 KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3);
1586 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1587 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1588 KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1589 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1590 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1591 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1592 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1593 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1594 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1596 tb_tunnel_free(tunnel2);
1597 tb_tunnel_free(tunnel1);
1600 static void tb_test_tunnel_port_on_path(struct kunit *test)
1602 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1603 struct tb_port *in, *out, *port;
1604 struct tb_tunnel *dp_tunnel;
1613 * [Device #2] | [Device #4]
1620 host = alloc_host(test);
1621 dev1 = alloc_dev_default(test, host, 0x3, true);
1622 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1623 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1624 dev4 = alloc_dev_default(test, dev1, 0x703, true);
1625 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1627 in = &dev2->ports[13];
1628 out = &dev5->ports[13];
1630 dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1631 KUNIT_ASSERT_TRUE(test, dp_tunnel != NULL);
1633 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
1634 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
1636 port = &host->ports[8];
1637 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1639 port = &host->ports[3];
1640 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1642 port = &dev1->ports[1];
1643 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1645 port = &dev1->ports[3];
1646 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1648 port = &dev1->ports[5];
1649 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1651 port = &dev1->ports[7];
1652 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1654 port = &dev3->ports[1];
1655 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1657 port = &dev5->ports[1];
1658 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1660 port = &dev4->ports[1];
1661 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1663 tb_tunnel_free(dp_tunnel);
1666 static void tb_test_tunnel_dma(struct kunit *test)
1668 struct tb_port *nhi, *port;
1669 struct tb_tunnel *tunnel;
1670 struct tb_switch *host;
1673 * Create DMA tunnel from NHI to port 1 and back.
1676 * 1 ^ In HopID 1 -> Out HopID 8
1678 * v In HopID 8 -> Out HopID 1
1679 * ............ Domain border
1683 host = alloc_host(test);
1684 nhi = &host->ports[7];
1685 port = &host->ports[1];
1687 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1688 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1689 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1690 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1691 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1692 KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1694 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1695 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1696 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1697 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1698 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
1700 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
1701 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1702 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1703 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
1704 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
1706 tb_tunnel_free(tunnel);
1709 static void tb_test_tunnel_dma_rx(struct kunit *test)
1711 struct tb_port *nhi, *port;
1712 struct tb_tunnel *tunnel;
1713 struct tb_switch *host;
1716 * Create DMA RX tunnel from port 1 to NHI.
1721 * | In HopID 15 -> Out HopID 2
1722 * ............ Domain border
1726 host = alloc_host(test);
1727 nhi = &host->ports[7];
1728 port = &host->ports[1];
1730 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
1731 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1732 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1733 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1734 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1735 KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1737 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1738 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1739 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
1740 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1741 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
1743 tb_tunnel_free(tunnel);
1746 static void tb_test_tunnel_dma_tx(struct kunit *test)
1748 struct tb_port *nhi, *port;
1749 struct tb_tunnel *tunnel;
1750 struct tb_switch *host;
1753 * Create DMA TX tunnel from NHI to port 1.
1756 * 1 | In HopID 2 -> Out HopID 15
1759 * ............ Domain border
1763 host = alloc_host(test);
1764 nhi = &host->ports[7];
1765 port = &host->ports[1];
1767 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
1768 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1769 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1770 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1771 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1772 KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1774 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1775 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
1776 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
1777 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
1778 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
1780 tb_tunnel_free(tunnel);
1783 static void tb_test_tunnel_dma_chain(struct kunit *test)
1785 struct tb_switch *host, *dev1, *dev2;
1786 struct tb_port *nhi, *port;
1787 struct tb_tunnel *tunnel;
1790 * Create DMA tunnel from NHI to Device #2 port 3 and back.
1793 * 1 ^ In HopID 1 -> Out HopID x
1795 * 1 | In HopID x -> Out HopID 1
1800 * 3 | In HopID x -> Out HopID 8
1802 * v In HopID 8 -> Out HopID x
1803 * ............ Domain border
1807 host = alloc_host(test);
1808 dev1 = alloc_dev_default(test, host, 0x1, true);
1809 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1811 nhi = &host->ports[7];
1812 port = &dev2->ports[3];
1813 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1814 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1815 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1816 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1817 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1818 KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1820 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1821 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1822 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1823 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
1825 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
1827 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
1829 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
1831 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
1832 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
1834 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1835 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1836 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1837 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
1839 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
1841 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
1843 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
1844 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
1846 tb_tunnel_free(tunnel);
1849 static void tb_test_tunnel_dma_match(struct kunit *test)
1851 struct tb_port *nhi, *port;
1852 struct tb_tunnel *tunnel;
1853 struct tb_switch *host;
1855 host = alloc_host(test);
1856 nhi = &host->ports[7];
1857 port = &host->ports[1];
1859 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
1860 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1862 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1863 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
1864 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1865 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1866 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1867 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1868 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1869 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
1870 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1871 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
1873 tb_tunnel_free(tunnel);
1875 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
1876 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1877 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1878 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1879 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1880 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1881 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1882 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1883 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1885 tb_tunnel_free(tunnel);
1887 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
1888 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1889 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
1890 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1891 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
1892 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1893 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1894 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
1895 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1897 tb_tunnel_free(tunnel);
1900 static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
1902 struct tb_switch *host, *dev;
1903 struct tb_port *up, *down;
1904 struct tb_tunnel *tunnel;
1905 struct tb_path *path;
1907 host = alloc_host(test);
1908 dev = alloc_dev_default(test, host, 0x1, false);
1910 down = &host->ports[8];
1911 up = &dev->ports[9];
1912 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
1913 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1914 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1916 path = tunnel->paths[0];
1917 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1918 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1919 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1920 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1921 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
1923 path = tunnel->paths[1];
1924 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1925 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1926 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1927 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1928 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
1930 tb_tunnel_free(tunnel);
1933 static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
1935 struct tb_switch *host, *dev;
1936 struct tb_port *up, *down;
1937 struct tb_tunnel *tunnel;
1938 struct tb_path *path;
1940 host = alloc_host(test);
1941 dev = alloc_dev_default(test, host, 0x1, true);
1943 down = &host->ports[8];
1944 up = &dev->ports[9];
1945 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
1946 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1947 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1949 path = tunnel->paths[0];
1950 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1951 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1952 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1953 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1954 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
1956 path = tunnel->paths[1];
1957 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1958 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1959 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1960 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1961 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
1963 tb_tunnel_free(tunnel);
1966 static void tb_test_credit_alloc_pcie(struct kunit *test)
1968 struct tb_switch *host, *dev;
1969 struct tb_port *up, *down;
1970 struct tb_tunnel *tunnel;
1971 struct tb_path *path;
1973 host = alloc_host_usb4(test);
1974 dev = alloc_dev_usb4(test, host, 0x1, true);
1976 down = &host->ports[8];
1977 up = &dev->ports[9];
1978 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
1979 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1980 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1982 path = tunnel->paths[0];
1983 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1984 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1985 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1986 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1987 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
1989 path = tunnel->paths[1];
1990 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1991 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1992 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1993 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1994 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
1996 tb_tunnel_free(tunnel);
1999 static void tb_test_credit_alloc_dp(struct kunit *test)
2001 struct tb_switch *host, *dev;
2002 struct tb_port *in, *out;
2003 struct tb_tunnel *tunnel;
2004 struct tb_path *path;
2006 host = alloc_host_usb4(test);
2007 dev = alloc_dev_usb4(test, host, 0x1, true);
2009 in = &host->ports[5];
2010 out = &dev->ports[14];
2012 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
2013 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
2014 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
2016 /* Video (main) path */
2017 path = tunnel->paths[0];
2018 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2019 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2020 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2021 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2022 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2025 path = tunnel->paths[1];
2026 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2027 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2028 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2029 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2030 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2033 path = tunnel->paths[2];
2034 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2035 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2036 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2037 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2038 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2040 tb_tunnel_free(tunnel);
2043 static void tb_test_credit_alloc_usb3(struct kunit *test)
2045 struct tb_switch *host, *dev;
2046 struct tb_port *up, *down;
2047 struct tb_tunnel *tunnel;
2048 struct tb_path *path;
2050 host = alloc_host_usb4(test);
2051 dev = alloc_dev_usb4(test, host, 0x1, true);
2053 down = &host->ports[12];
2054 up = &dev->ports[16];
2055 tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2056 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
2057 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2059 path = tunnel->paths[0];
2060 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2061 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2062 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2063 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2064 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2066 path = tunnel->paths[1];
2067 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2068 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2069 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2070 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2071 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2073 tb_tunnel_free(tunnel);
2076 static void tb_test_credit_alloc_dma(struct kunit *test)
2078 struct tb_switch *host, *dev;
2079 struct tb_port *nhi, *port;
2080 struct tb_tunnel *tunnel;
2081 struct tb_path *path;
2083 host = alloc_host_usb4(test);
2084 dev = alloc_dev_usb4(test, host, 0x1, true);
2086 nhi = &host->ports[7];
2087 port = &dev->ports[3];
2089 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2090 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
2091 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2094 path = tunnel->paths[0];
2095 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2096 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2097 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2098 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2099 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2102 path = tunnel->paths[1];
2103 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2104 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2105 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2106 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2107 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2109 tb_tunnel_free(tunnel);
2112 static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
2114 struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
2115 struct tb_switch *host, *dev;
2116 struct tb_port *nhi, *port;
2117 struct tb_path *path;
2119 host = alloc_host_usb4(test);
2120 dev = alloc_dev_usb4(test, host, 0x1, true);
2122 nhi = &host->ports[7];
2123 port = &dev->ports[3];
2126 * Create three DMA tunnels through the same ports. With the
2127 * default buffers we should be able to create two and the last
2130 * For default host we have following buffers for DMA:
2132 * 120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20
2134 * For device we have following:
2136 * 120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34
2138 * spare = 14 + 1 = 15
2140 * So on host the first tunnel gets 14 and the second gets the
2141 * remaining 1 and then we run out of buffers.
2143 tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2144 KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
2145 KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
2147 path = tunnel1->paths[0];
2148 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2149 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2150 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2151 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2152 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2154 path = tunnel1->paths[1];
2155 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2156 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2157 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2158 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2159 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2161 tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2162 KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
2163 KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
2165 path = tunnel2->paths[0];
2166 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2167 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2168 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2169 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2170 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2172 path = tunnel2->paths[1];
2173 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2174 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2175 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2176 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2177 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2179 tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2180 KUNIT_ASSERT_TRUE(test, tunnel3 == NULL);
2183 * Release the first DMA tunnel. That should make 14 buffers
2184 * available for the next tunnel.
2186 tb_tunnel_free(tunnel1);
2188 tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2189 KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
2191 path = tunnel3->paths[0];
2192 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2193 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2194 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2195 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2196 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2198 path = tunnel3->paths[1];
2199 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2200 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2201 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2202 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2203 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2205 tb_tunnel_free(tunnel3);
2206 tb_tunnel_free(tunnel2);
2209 static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
2210 struct tb_switch *host, struct tb_switch *dev)
2212 struct tb_port *up, *down;
2213 struct tb_tunnel *pcie_tunnel;
2214 struct tb_path *path;
2216 down = &host->ports[8];
2217 up = &dev->ports[9];
2218 pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2219 KUNIT_ASSERT_TRUE(test, pcie_tunnel != NULL);
2220 KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2);
2222 path = pcie_tunnel->paths[0];
2223 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2224 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2225 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2226 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2227 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2229 path = pcie_tunnel->paths[1];
2230 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2231 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2232 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2233 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2234 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2239 static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
2240 struct tb_switch *host, struct tb_switch *dev)
2242 struct tb_port *in, *out;
2243 struct tb_tunnel *dp_tunnel1;
2244 struct tb_path *path;
2246 in = &host->ports[5];
2247 out = &dev->ports[13];
2248 dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
2249 KUNIT_ASSERT_TRUE(test, dp_tunnel1 != NULL);
2250 KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
2252 path = dp_tunnel1->paths[0];
2253 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2254 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2255 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2256 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2257 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2259 path = dp_tunnel1->paths[1];
2260 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2261 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2262 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2263 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2264 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2266 path = dp_tunnel1->paths[2];
2267 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2268 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2269 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2270 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2271 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2276 static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
2277 struct tb_switch *host, struct tb_switch *dev)
2279 struct tb_port *in, *out;
2280 struct tb_tunnel *dp_tunnel2;
2281 struct tb_path *path;
2283 in = &host->ports[6];
2284 out = &dev->ports[14];
2285 dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
2286 KUNIT_ASSERT_TRUE(test, dp_tunnel2 != NULL);
2287 KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
2289 path = dp_tunnel2->paths[0];
2290 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2291 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2292 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2293 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2294 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2296 path = dp_tunnel2->paths[1];
2297 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2298 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2299 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2300 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2301 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2303 path = dp_tunnel2->paths[2];
2304 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2305 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2306 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2307 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2308 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2313 static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test,
2314 struct tb_switch *host, struct tb_switch *dev)
2316 struct tb_port *up, *down;
2317 struct tb_tunnel *usb3_tunnel;
2318 struct tb_path *path;
2320 down = &host->ports[12];
2321 up = &dev->ports[16];
2322 usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2323 KUNIT_ASSERT_TRUE(test, usb3_tunnel != NULL);
2324 KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2);
2326 path = usb3_tunnel->paths[0];
2327 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2328 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2329 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2330 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2331 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2333 path = usb3_tunnel->paths[1];
2334 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2335 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2336 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2337 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2338 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2343 static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test,
2344 struct tb_switch *host, struct tb_switch *dev)
2346 struct tb_port *nhi, *port;
2347 struct tb_tunnel *dma_tunnel1;
2348 struct tb_path *path;
2350 nhi = &host->ports[7];
2351 port = &dev->ports[3];
2352 dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2353 KUNIT_ASSERT_TRUE(test, dma_tunnel1 != NULL);
2354 KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2);
2356 path = dma_tunnel1->paths[0];
2357 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2358 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2359 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2360 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2361 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2363 path = dma_tunnel1->paths[1];
2364 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2365 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2366 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2367 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2368 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2373 static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test,
2374 struct tb_switch *host, struct tb_switch *dev)
2376 struct tb_port *nhi, *port;
2377 struct tb_tunnel *dma_tunnel2;
2378 struct tb_path *path;
2380 nhi = &host->ports[7];
2381 port = &dev->ports[3];
2382 dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2383 KUNIT_ASSERT_TRUE(test, dma_tunnel2 != NULL);
2384 KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2);
2386 path = dma_tunnel2->paths[0];
2387 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2388 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2389 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2390 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2391 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2393 path = dma_tunnel2->paths[1];
2394 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2395 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2396 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2397 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2398 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2403 static void tb_test_credit_alloc_all(struct kunit *test)
2405 struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
2406 struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
2407 struct tb_switch *host, *dev;
2410 * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
2411 * device. Expectation is that all these can be established with
2412 * the default credit allocation found in Intel hardware.
2415 host = alloc_host_usb4(test);
2416 dev = alloc_dev_usb4(test, host, 0x1, true);
2418 pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev);
2419 dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev);
2420 dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev);
2421 usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev);
2422 dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
2423 dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
2425 tb_tunnel_free(dma_tunnel2);
2426 tb_tunnel_free(dma_tunnel1);
2427 tb_tunnel_free(usb3_tunnel);
2428 tb_tunnel_free(dp_tunnel2);
2429 tb_tunnel_free(dp_tunnel1);
2430 tb_tunnel_free(pcie_tunnel);
2433 static const u32 root_directory[] = {
2434 0x55584401, /* "UXD" v1 */
2435 0x00000018, /* Root directory length */
2436 0x76656e64, /* "vend" */
2437 0x6f726964, /* "orid" */
2438 0x76000001, /* "v" R 1 */
2439 0x00000a27, /* Immediate value, ! Vendor ID */
2440 0x76656e64, /* "vend" */
2441 0x6f726964, /* "orid" */
2442 0x74000003, /* "t" R 3 */
2443 0x0000001a, /* Text leaf offset, (“Apple Inc.”) */
2444 0x64657669, /* "devi" */
2445 0x63656964, /* "ceid" */
2446 0x76000001, /* "v" R 1 */
2447 0x0000000a, /* Immediate value, ! Device ID */
2448 0x64657669, /* "devi" */
2449 0x63656964, /* "ceid" */
2450 0x74000003, /* "t" R 3 */
2451 0x0000001d, /* Text leaf offset, (“Macintosh”) */
2452 0x64657669, /* "devi" */
2453 0x63657276, /* "cerv" */
2454 0x76000001, /* "v" R 1 */
2455 0x80000100, /* Immediate value, Device Revision */
2456 0x6e657477, /* "netw" */
2457 0x6f726b00, /* "ork" */
2458 0x44000014, /* "D" R 20 */
2459 0x00000021, /* Directory data offset, (Network Directory) */
2460 0x4170706c, /* "Appl" */
2461 0x6520496e, /* "e In" */
2462 0x632e0000, /* "c." ! */
2463 0x4d616369, /* "Maci" */
2464 0x6e746f73, /* "ntos" */
2465 0x68000000, /* "h" */
2466 0x00000000, /* padding */
2467 0xca8961c6, /* Directory UUID, Network Directory */
2468 0x9541ce1c, /* Directory UUID, Network Directory */
2469 0x5949b8bd, /* Directory UUID, Network Directory */
2470 0x4f5a5f2e, /* Directory UUID, Network Directory */
2471 0x70727463, /* "prtc" */
2472 0x69640000, /* "id" */
2473 0x76000001, /* "v" R 1 */
2474 0x00000001, /* Immediate value, Network Protocol ID */
2475 0x70727463, /* "prtc" */
2476 0x76657273, /* "vers" */
2477 0x76000001, /* "v" R 1 */
2478 0x00000001, /* Immediate value, Network Protocol Version */
2479 0x70727463, /* "prtc" */
2480 0x72657673, /* "revs" */
2481 0x76000001, /* "v" R 1 */
2482 0x00000001, /* Immediate value, Network Protocol Revision */
2483 0x70727463, /* "prtc" */
2484 0x73746e73, /* "stns" */
2485 0x76000001, /* "v" R 1 */
2486 0x00000000, /* Immediate value, Network Protocol Settings */
2489 static const uuid_t network_dir_uuid =
2490 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
2491 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
2493 static void tb_test_property_parse(struct kunit *test)
2495 struct tb_property_dir *dir, *network_dir;
2496 struct tb_property *p;
2498 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2499 KUNIT_ASSERT_TRUE(test, dir != NULL);
2501 p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
2502 KUNIT_ASSERT_TRUE(test, !p);
2504 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
2505 KUNIT_ASSERT_TRUE(test, p != NULL);
2506 KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
2508 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
2509 KUNIT_ASSERT_TRUE(test, p != NULL);
2510 KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27);
2512 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2513 KUNIT_ASSERT_TRUE(test, p != NULL);
2514 KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
2516 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2517 KUNIT_ASSERT_TRUE(test, p != NULL);
2518 KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa);
2520 p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
2521 KUNIT_ASSERT_TRUE(test, !p);
2523 p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
2524 KUNIT_ASSERT_TRUE(test, p != NULL);
2526 network_dir = p->value.dir;
2527 KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
2529 p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
2530 KUNIT_ASSERT_TRUE(test, p != NULL);
2531 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2533 p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
2534 KUNIT_ASSERT_TRUE(test, p != NULL);
2535 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2537 p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
2538 KUNIT_ASSERT_TRUE(test, p != NULL);
2539 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2541 p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
2542 KUNIT_ASSERT_TRUE(test, p != NULL);
2543 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0);
2545 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2546 KUNIT_EXPECT_TRUE(test, !p);
2547 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2548 KUNIT_EXPECT_TRUE(test, !p);
2550 tb_property_free_dir(dir);
2553 static void tb_test_property_format(struct kunit *test)
2555 struct tb_property_dir *dir;
2560 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2561 KUNIT_ASSERT_TRUE(test, dir != NULL);
2563 ret = tb_property_format_dir(dir, NULL, 0);
2564 KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2568 block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
2569 KUNIT_ASSERT_TRUE(test, block != NULL);
2571 ret = tb_property_format_dir(dir, block, block_len);
2572 KUNIT_EXPECT_EQ(test, ret, 0);
2574 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2575 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2577 tb_property_free_dir(dir);
2580 static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
2581 struct tb_property_dir *d2)
2583 struct tb_property *p1, *p2, *tmp;
2587 KUNIT_ASSERT_TRUE(test, d2->uuid != NULL);
2588 KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
2590 KUNIT_ASSERT_TRUE(test, d2->uuid == NULL);
2594 tb_property_for_each(d1, tmp)
2596 KUNIT_ASSERT_NE(test, n1, 0);
2599 tb_property_for_each(d2, tmp)
2601 KUNIT_ASSERT_NE(test, n2, 0);
2603 KUNIT_ASSERT_EQ(test, n1, n2);
2607 for (i = 0; i < n1; i++) {
2608 p1 = tb_property_get_next(d1, p1);
2609 KUNIT_ASSERT_TRUE(test, p1 != NULL);
2610 p2 = tb_property_get_next(d2, p2);
2611 KUNIT_ASSERT_TRUE(test, p2 != NULL);
2613 KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
2614 KUNIT_ASSERT_EQ(test, p1->type, p2->type);
2615 KUNIT_ASSERT_EQ(test, p1->length, p2->length);
2618 case TB_PROPERTY_TYPE_DIRECTORY:
2619 KUNIT_ASSERT_TRUE(test, p1->value.dir != NULL);
2620 KUNIT_ASSERT_TRUE(test, p2->value.dir != NULL);
2621 compare_dirs(test, p1->value.dir, p2->value.dir);
2624 case TB_PROPERTY_TYPE_DATA:
2625 KUNIT_ASSERT_TRUE(test, p1->value.data != NULL);
2626 KUNIT_ASSERT_TRUE(test, p2->value.data != NULL);
2627 KUNIT_ASSERT_TRUE(test,
2628 !memcmp(p1->value.data, p2->value.data,
2633 case TB_PROPERTY_TYPE_TEXT:
2634 KUNIT_ASSERT_TRUE(test, p1->value.text != NULL);
2635 KUNIT_ASSERT_TRUE(test, p2->value.text != NULL);
2636 KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
2639 case TB_PROPERTY_TYPE_VALUE:
2640 KUNIT_ASSERT_EQ(test, p1->value.immediate,
2641 p2->value.immediate);
2644 KUNIT_FAIL(test, "unexpected property type");
2650 static void tb_test_property_copy(struct kunit *test)
2652 struct tb_property_dir *src, *dst;
2656 src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2657 KUNIT_ASSERT_TRUE(test, src != NULL);
2659 dst = tb_property_copy_dir(src);
2660 KUNIT_ASSERT_TRUE(test, dst != NULL);
2662 /* Compare the structures */
2663 compare_dirs(test, src, dst);
2665 /* Compare the resulting property block */
2666 ret = tb_property_format_dir(dst, NULL, 0);
2667 KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2669 block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
2670 KUNIT_ASSERT_TRUE(test, block != NULL);
2672 ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
2673 KUNIT_EXPECT_TRUE(test, !ret);
2675 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2676 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2678 tb_property_free_dir(dst);
2679 tb_property_free_dir(src);
2682 static struct kunit_case tb_test_cases[] = {
2683 KUNIT_CASE(tb_test_path_basic),
2684 KUNIT_CASE(tb_test_path_not_connected_walk),
2685 KUNIT_CASE(tb_test_path_single_hop_walk),
2686 KUNIT_CASE(tb_test_path_daisy_chain_walk),
2687 KUNIT_CASE(tb_test_path_simple_tree_walk),
2688 KUNIT_CASE(tb_test_path_complex_tree_walk),
2689 KUNIT_CASE(tb_test_path_max_length_walk),
2690 KUNIT_CASE(tb_test_path_not_connected),
2691 KUNIT_CASE(tb_test_path_not_bonded_lane0),
2692 KUNIT_CASE(tb_test_path_not_bonded_lane1),
2693 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
2694 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
2695 KUNIT_CASE(tb_test_path_mixed_chain),
2696 KUNIT_CASE(tb_test_path_mixed_chain_reverse),
2697 KUNIT_CASE(tb_test_tunnel_pcie),
2698 KUNIT_CASE(tb_test_tunnel_dp),
2699 KUNIT_CASE(tb_test_tunnel_dp_chain),
2700 KUNIT_CASE(tb_test_tunnel_dp_tree),
2701 KUNIT_CASE(tb_test_tunnel_dp_max_length),
2702 KUNIT_CASE(tb_test_tunnel_port_on_path),
2703 KUNIT_CASE(tb_test_tunnel_usb3),
2704 KUNIT_CASE(tb_test_tunnel_dma),
2705 KUNIT_CASE(tb_test_tunnel_dma_rx),
2706 KUNIT_CASE(tb_test_tunnel_dma_tx),
2707 KUNIT_CASE(tb_test_tunnel_dma_chain),
2708 KUNIT_CASE(tb_test_tunnel_dma_match),
2709 KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
2710 KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
2711 KUNIT_CASE(tb_test_credit_alloc_pcie),
2712 KUNIT_CASE(tb_test_credit_alloc_dp),
2713 KUNIT_CASE(tb_test_credit_alloc_usb3),
2714 KUNIT_CASE(tb_test_credit_alloc_dma),
2715 KUNIT_CASE(tb_test_credit_alloc_dma_multiple),
2716 KUNIT_CASE(tb_test_credit_alloc_all),
2717 KUNIT_CASE(tb_test_property_parse),
2718 KUNIT_CASE(tb_test_property_format),
2719 KUNIT_CASE(tb_test_property_copy),
2723 static struct kunit_suite tb_test_suite = {
2724 .name = "thunderbolt",
2725 .test_cases = tb_test_cases,
2728 static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL };
2730 int tb_test_init(void)
2732 return __kunit_test_suites_init(tb_test_suites);
2735 void tb_test_exit(void)
2737 return __kunit_test_suites_exit(tb_test_suites);