]> Git Repo - linux.git/blame - drivers/thunderbolt/tb.h
thunderbolt: Ensure left shift of 512 does not overflow a 32 bit int
[linux.git] / drivers / thunderbolt / tb.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
d6cc51cd 2/*
15c6784c 3 * Thunderbolt driver - bus logic (NHI independent)
d6cc51cd
AN
4 *
5 * Copyright (c) 2014 Andreas Noever <[email protected]>
15c6784c 6 * Copyright (C) 2018, Intel Corporation
d6cc51cd
AN
7 */
8
9#ifndef TB_H_
10#define TB_H_
11
e6b245cc 12#include <linux/nvmem-provider.h>
a25c8b2f 13#include <linux/pci.h>
d1ff7024 14#include <linux/thunderbolt.h>
bfe778ac 15#include <linux/uuid.h>
a25c8b2f
AN
16
17#include "tb_regs.h"
d6cc51cd 18#include "ctl.h"
3e136768 19#include "dma_port.h"
d6cc51cd 20
719a5fe8
MW
21#define NVM_MIN_SIZE SZ_32K
22#define NVM_MAX_SIZE SZ_512K
23
24/* Intel specific NVM offsets */
25#define NVM_DEVID 0x05
26#define NVM_VERSION 0x08
27#define NVM_FLASH_SIZE 0x45
28
e6b245cc 29/**
719a5fe8
MW
30 * struct tb_nvm - Structure holding NVM information
31 * @dev: Owner of the NVM
e6b245cc
MW
32 * @major: Major version number of the active NVM portion
33 * @minor: Minor version number of the active NVM portion
34 * @id: Identifier used with both NVM portions
35 * @active: Active portion NVMem device
36 * @non_active: Non-active portion NVMem device
37 * @buf: Buffer where the NVM image is stored before it is written to
38 * the actual NVM flash device
39 * @buf_data_size: Number of bytes actually consumed by the new NVM
40 * image
719a5fe8
MW
41 * @authenticating: The device is authenticating the new NVM
42 *
43 * The user of this structure needs to handle serialization of possible
44 * concurrent access.
e6b245cc 45 */
719a5fe8
MW
46struct tb_nvm {
47 struct device *dev;
e6b245cc
MW
48 u8 major;
49 u8 minor;
50 int id;
51 struct nvmem_device *active;
52 struct nvmem_device *non_active;
53 void *buf;
54 size_t buf_data_size;
55 bool authenticating;
56};
57
f67cf491 58#define TB_SWITCH_KEY_SIZE 32
f0342e75 59#define TB_SWITCH_MAX_DEPTH 6
b0407983 60#define USB4_SWITCH_MAX_DEPTH 5
f67cf491 61
cf29b9af
RM
62/**
63 * enum tb_switch_tmu_rate - TMU refresh rate
64 * @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake)
65 * @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive
66 * transmission of the Delay Request TSNOS
67 * (Time Sync Notification Ordered Set) on a Link
68 * @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive
69 * transmission of the Delay Request TSNOS on
70 * a Link
71 */
72enum tb_switch_tmu_rate {
73 TB_SWITCH_TMU_RATE_OFF = 0,
74 TB_SWITCH_TMU_RATE_HIFI = 16,
75 TB_SWITCH_TMU_RATE_NORMAL = 1000,
76};
77
78/**
79 * struct tb_switch_tmu - Structure holding switch TMU configuration
80 * @cap: Offset to the TMU capability (%0 if not found)
81 * @has_ucap: Does the switch support uni-directional mode
82 * @rate: TMU refresh rate related to upstream switch. In case of root
83 * switch this holds the domain rate.
84 * @unidirectional: Is the TMU in uni-directional or bi-directional mode
85 * related to upstream switch. Don't case for root switch.
86 */
87struct tb_switch_tmu {
88 int cap;
89 bool has_ucap;
90 enum tb_switch_tmu_rate rate;
91 bool unidirectional;
92};
93
a25c8b2f
AN
94/**
95 * struct tb_switch - a thunderbolt switch
bfe778ac
MW
96 * @dev: Device for the switch
97 * @config: Switch configuration
98 * @ports: Ports in this switch
3e136768
MW
99 * @dma_port: If the switch has port supporting DMA configuration based
100 * mailbox this will hold the pointer to that (%NULL
e6b245cc
MW
101 * otherwise). If set it also means the switch has
102 * upgradeable NVM.
cf29b9af 103 * @tmu: The switch TMU configuration
bfe778ac
MW
104 * @tb: Pointer to the domain the switch belongs to
105 * @uid: Unique ID of the switch
106 * @uuid: UUID of the switch (or %NULL if not supported)
107 * @vendor: Vendor ID of the switch
108 * @device: Device ID of the switch
72ee3390
MW
109 * @vendor_name: Name of the vendor (or %NULL if not known)
110 * @device_name: Name of the device (or %NULL if not known)
91c0c120
MW
111 * @link_speed: Speed of the link in Gb/s
112 * @link_width: Width of the link (1 or 2)
bbcf40b3 113 * @link_usb4: Upstream link is USB4
2c3c4197 114 * @generation: Switch Thunderbolt generation
bfe778ac 115 * @cap_plug_events: Offset to the plug events capability (%0 if not found)
a9be5582 116 * @cap_lc: Offset to the link controller capability (%0 if not found)
bfe778ac
MW
117 * @is_unplugged: The switch is going away
118 * @drom: DROM of the switch (%NULL if not found)
e6b245cc
MW
119 * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise)
120 * @no_nvm_upgrade: Prevent NVM upgrade of this switch
121 * @safe_mode: The switch is in safe-mode
14862ee3 122 * @boot: Whether the switch was already authorized on boot or not
2d8ff0b5 123 * @rpm: The switch supports runtime PM
f67cf491 124 * @authorized: Whether the switch is authorized by user or policy
f67cf491
MW
125 * @security_level: Switch supported security level
126 * @key: Contains the key used to challenge the device or %NULL if not
127 * supported. Size of the key is %TB_SWITCH_KEY_SIZE.
128 * @connection_id: Connection ID used with ICM messaging
129 * @connection_key: Connection key used with ICM messaging
130 * @link: Root switch link this switch is connected (ICM only)
131 * @depth: Depth in the chain this switch is connected (ICM only)
4f7c2e0d
MW
132 * @rpm_complete: Completion used to wait for runtime resume to
133 * complete (ICM only)
f67cf491
MW
134 *
135 * When the switch is being added or removed to the domain (other
09f11b6c 136 * switches) you need to have domain lock held.
a25c8b2f
AN
137 */
138struct tb_switch {
bfe778ac 139 struct device dev;
a25c8b2f
AN
140 struct tb_regs_switch_header config;
141 struct tb_port *ports;
3e136768 142 struct tb_dma_port *dma_port;
cf29b9af 143 struct tb_switch_tmu tmu;
a25c8b2f 144 struct tb *tb;
c90553b3 145 u64 uid;
7c39ffe7 146 uuid_t *uuid;
bfe778ac
MW
147 u16 vendor;
148 u16 device;
72ee3390
MW
149 const char *vendor_name;
150 const char *device_name;
91c0c120
MW
151 unsigned int link_speed;
152 unsigned int link_width;
bbcf40b3 153 bool link_usb4;
2c3c4197 154 unsigned int generation;
bfe778ac 155 int cap_plug_events;
a9be5582 156 int cap_lc;
bfe778ac 157 bool is_unplugged;
cd22e73b 158 u8 *drom;
719a5fe8 159 struct tb_nvm *nvm;
e6b245cc
MW
160 bool no_nvm_upgrade;
161 bool safe_mode;
14862ee3 162 bool boot;
2d8ff0b5 163 bool rpm;
f67cf491 164 unsigned int authorized;
f67cf491
MW
165 enum tb_security_level security_level;
166 u8 *key;
167 u8 connection_id;
168 u8 connection_key;
169 u8 link;
170 u8 depth;
4f7c2e0d 171 struct completion rpm_complete;
a25c8b2f
AN
172};
173
174/**
175 * struct tb_port - a thunderbolt port, part of a tb_switch
d1ff7024
MW
176 * @config: Cached port configuration read from registers
177 * @sw: Switch the port belongs to
178 * @remote: Remote port (%NULL if not connected)
179 * @xdomain: Remote host (%NULL if not connected)
180 * @cap_phy: Offset, zero if not found
cf29b9af 181 * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present)
56183c88 182 * @cap_adap: Offset of the adapter specific capability (%0 if not present)
b0407983 183 * @cap_usb4: Offset to the USB4 port capability (%0 if not present)
d1ff7024
MW
184 * @port: Port number on switch
185 * @disabled: Disabled by eeprom
91c0c120 186 * @bonded: true if the port is bonded (two lanes combined as one)
d1ff7024
MW
187 * @dual_link_port: If the switch is connected using two ports, points
188 * to the other port.
189 * @link_nr: Is this primary or secondary port on the dual_link.
0b2863ac
MW
190 * @in_hopids: Currently allocated input HopIDs
191 * @out_hopids: Currently allocated output HopIDs
8afe909b 192 * @list: Used to link ports to DP resources list
a25c8b2f
AN
193 */
194struct tb_port {
195 struct tb_regs_port_header config;
196 struct tb_switch *sw;
d1ff7024
MW
197 struct tb_port *remote;
198 struct tb_xdomain *xdomain;
199 int cap_phy;
cf29b9af 200 int cap_tmu;
56183c88 201 int cap_adap;
b0407983 202 int cap_usb4;
d1ff7024
MW
203 u8 port;
204 bool disabled;
91c0c120 205 bool bonded;
cd22e73b
AN
206 struct tb_port *dual_link_port;
207 u8 link_nr:1;
0b2863ac
MW
208 struct ida in_hopids;
209 struct ida out_hopids;
8afe909b 210 struct list_head list;
a25c8b2f
AN
211};
212
dacb1287
KK
213/**
214 * tb_retimer: Thunderbolt retimer
215 * @dev: Device for the retimer
216 * @tb: Pointer to the domain the retimer belongs to
217 * @index: Retimer index facing the router USB4 port
218 * @vendor: Vendor ID of the retimer
219 * @device: Device ID of the retimer
220 * @port: Pointer to the lane 0 adapter
221 * @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise)
222 * @auth_status: Status of last NVM authentication
223 */
224struct tb_retimer {
225 struct device dev;
226 struct tb *tb;
227 u8 index;
228 u32 vendor;
229 u32 device;
230 struct tb_port *port;
231 struct tb_nvm *nvm;
232 u32 auth_status;
233};
234
520b6702
AN
235/**
236 * struct tb_path_hop - routing information for a tb_path
8c7acaaf
MW
237 * @in_port: Ingress port of a switch
238 * @out_port: Egress port of a switch where the packet is routed out
239 * (must be on the same switch than @in_port)
240 * @in_hop_index: HopID where the path configuration entry is placed in
241 * the path config space of @in_port.
242 * @in_counter_index: Used counter index (not used in the driver
243 * currently, %-1 to disable)
244 * @next_hop_index: HopID of the packet when it is routed out from @out_port
0414bec5
MW
245 * @initial_credits: Number of initial flow control credits allocated for
246 * the path
520b6702
AN
247 *
248 * Hop configuration is always done on the IN port of a switch.
249 * in_port and out_port have to be on the same switch. Packets arriving on
250 * in_port with "hop" = in_hop_index will get routed to through out_port. The
8c7acaaf
MW
251 * next hop to take (on out_port->remote) is determined by
252 * next_hop_index. When routing packet to another switch (out->remote is
253 * set) the @next_hop_index must match the @in_hop_index of that next
254 * hop to make routing possible.
520b6702
AN
255 *
256 * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in
257 * port.
258 */
259struct tb_path_hop {
260 struct tb_port *in_port;
261 struct tb_port *out_port;
262 int in_hop_index;
8c7acaaf 263 int in_counter_index;
520b6702 264 int next_hop_index;
0414bec5 265 unsigned int initial_credits;
520b6702
AN
266};
267
268/**
269 * enum tb_path_port - path options mask
8c7acaaf
MW
270 * @TB_PATH_NONE: Do not activate on any hop on path
271 * @TB_PATH_SOURCE: Activate on the first hop (out of src)
272 * @TB_PATH_INTERNAL: Activate on the intermediate hops (not the first/last)
273 * @TB_PATH_DESTINATION: Activate on the last hop (into dst)
274 * @TB_PATH_ALL: Activate on all hops on the path
520b6702
AN
275 */
276enum tb_path_port {
277 TB_PATH_NONE = 0,
8c7acaaf
MW
278 TB_PATH_SOURCE = 1,
279 TB_PATH_INTERNAL = 2,
280 TB_PATH_DESTINATION = 4,
520b6702
AN
281 TB_PATH_ALL = 7,
282};
283
284/**
285 * struct tb_path - a unidirectional path between two ports
8c7acaaf
MW
286 * @tb: Pointer to the domain structure
287 * @name: Name of the path (used for debugging)
288 * @nfc_credits: Number of non flow controlled credits allocated for the path
289 * @ingress_shared_buffer: Shared buffering used for ingress ports on the path
290 * @egress_shared_buffer: Shared buffering used for egress ports on the path
291 * @ingress_fc_enable: Flow control for ingress ports on the path
292 * @egress_fc_enable: Flow control for egress ports on the path
293 * @priority: Priority group if the path
294 * @weight: Weight of the path inside the priority group
295 * @drop_packages: Drop packages from queue tail or head
296 * @activated: Is the path active
44242d6c
MW
297 * @clear_fc: Clear all flow control from the path config space entries
298 * when deactivating this path
8c7acaaf
MW
299 * @hops: Path hops
300 * @path_length: How many hops the path uses
520b6702 301 *
8c7acaaf
MW
302 * A path consists of a number of hops (see &struct tb_path_hop). To
303 * establish a PCIe tunnel two paths have to be created between the two
304 * PCIe ports.
520b6702
AN
305 */
306struct tb_path {
307 struct tb *tb;
8c7acaaf
MW
308 const char *name;
309 int nfc_credits;
520b6702
AN
310 enum tb_path_port ingress_shared_buffer;
311 enum tb_path_port egress_shared_buffer;
312 enum tb_path_port ingress_fc_enable;
313 enum tb_path_port egress_fc_enable;
314
37209783 315 unsigned int priority:3;
520b6702
AN
316 int weight:4;
317 bool drop_packages;
318 bool activated;
44242d6c 319 bool clear_fc;
520b6702 320 struct tb_path_hop *hops;
8c7acaaf 321 int path_length;
520b6702
AN
322};
323
0b2863ac
MW
324/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
325#define TB_PATH_MIN_HOPID 8
c738a794
MW
326/*
327 * Support paths from the farthest (depth 6) router to the host and back
328 * to the same level (not necessarily to the same router).
329 */
330#define TB_PATH_MAX_HOPS (7 * 2)
0b2863ac 331
9d3cce0b
MW
332/**
333 * struct tb_cm_ops - Connection manager specific operations vector
f67cf491
MW
334 * @driver_ready: Called right after control channel is started. Used by
335 * ICM to send driver ready message to the firmware.
9d3cce0b
MW
336 * @start: Starts the domain
337 * @stop: Stops the domain
338 * @suspend_noirq: Connection manager specific suspend_noirq
339 * @resume_noirq: Connection manager specific resume_noirq
f67cf491
MW
340 * @suspend: Connection manager specific suspend
341 * @complete: Connection manager specific complete
2d8ff0b5
MW
342 * @runtime_suspend: Connection manager specific runtime_suspend
343 * @runtime_resume: Connection manager specific runtime_resume
4f7c2e0d
MW
344 * @runtime_suspend_switch: Runtime suspend a switch
345 * @runtime_resume_switch: Runtime resume a switch
81a54b5e 346 * @handle_event: Handle thunderbolt event
9aaa3b8b
MW
347 * @get_boot_acl: Get boot ACL list
348 * @set_boot_acl: Set boot ACL list
f67cf491
MW
349 * @approve_switch: Approve switch
350 * @add_switch_key: Add key to switch
351 * @challenge_switch_key: Challenge switch using key
e6b245cc 352 * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update
d1ff7024
MW
353 * @approve_xdomain_paths: Approve (establish) XDomain DMA paths
354 * @disconnect_xdomain_paths: Disconnect XDomain DMA paths
9d3cce0b
MW
355 */
356struct tb_cm_ops {
f67cf491 357 int (*driver_ready)(struct tb *tb);
9d3cce0b
MW
358 int (*start)(struct tb *tb);
359 void (*stop)(struct tb *tb);
360 int (*suspend_noirq)(struct tb *tb);
361 int (*resume_noirq)(struct tb *tb);
f67cf491
MW
362 int (*suspend)(struct tb *tb);
363 void (*complete)(struct tb *tb);
2d8ff0b5
MW
364 int (*runtime_suspend)(struct tb *tb);
365 int (*runtime_resume)(struct tb *tb);
4f7c2e0d
MW
366 int (*runtime_suspend_switch)(struct tb_switch *sw);
367 int (*runtime_resume_switch)(struct tb_switch *sw);
81a54b5e
MW
368 void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type,
369 const void *buf, size_t size);
9aaa3b8b
MW
370 int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids);
371 int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids);
f67cf491
MW
372 int (*approve_switch)(struct tb *tb, struct tb_switch *sw);
373 int (*add_switch_key)(struct tb *tb, struct tb_switch *sw);
374 int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
375 const u8 *challenge, u8 *response);
e6b245cc 376 int (*disconnect_pcie_paths)(struct tb *tb);
d1ff7024
MW
377 int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
378 int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
9d3cce0b 379};
520b6702 380
9d3cce0b
MW
381static inline void *tb_priv(struct tb *tb)
382{
383 return (void *)tb->privdata;
384}
385
2d8ff0b5
MW
386#define TB_AUTOSUSPEND_DELAY 15000 /* ms */
387
a25c8b2f
AN
388/* helper functions & macros */
389
390/**
391 * tb_upstream_port() - return the upstream port of a switch
392 *
393 * Every switch has an upstream port (for the root switch it is the NHI).
394 *
395 * During switch alloc/init tb_upstream_port()->remote may be NULL, even for
396 * non root switches (on the NHI port remote is always NULL).
397 *
398 * Return: Returns the upstream port of the switch.
399 */
400static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
401{
402 return &sw->ports[sw->config.upstream_port_number];
403}
404
dfe40ca4
MW
405/**
406 * tb_is_upstream_port() - Is the port upstream facing
407 * @port: Port to check
408 *
409 * Returns true if @port is upstream facing port. In case of dual link
410 * ports both return true.
411 */
412static inline bool tb_is_upstream_port(const struct tb_port *port)
413{
414 const struct tb_port *upstream_port = tb_upstream_port(port->sw);
415 return port == upstream_port || port->dual_link_port == upstream_port;
416}
417
b323a98f 418static inline u64 tb_route(const struct tb_switch *sw)
a25c8b2f
AN
419{
420 return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
421}
422
f67cf491
MW
423static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw)
424{
425 u8 port;
426
427 port = route >> (sw->config.depth * 8);
428 if (WARN_ON(port > sw->config.max_port_number))
429 return NULL;
430 return &sw->ports[port];
431}
432
dfe40ca4
MW
433/**
434 * tb_port_has_remote() - Does the port have switch connected downstream
435 * @port: Port to check
436 *
437 * Returns true only when the port is primary port and has remote set.
438 */
439static inline bool tb_port_has_remote(const struct tb_port *port)
440{
441 if (tb_is_upstream_port(port))
442 return false;
443 if (!port->remote)
444 return false;
445 if (port->dual_link_port && port->link_nr)
446 return false;
447
448 return true;
449}
450
344e0643
MW
451static inline bool tb_port_is_null(const struct tb_port *port)
452{
453 return port && port->port && port->config.type == TB_TYPE_PORT;
454}
455
99cabbb0
MW
456static inline bool tb_port_is_pcie_down(const struct tb_port *port)
457{
458 return port && port->config.type == TB_TYPE_PCIE_DOWN;
459}
460
0414bec5
MW
461static inline bool tb_port_is_pcie_up(const struct tb_port *port)
462{
463 return port && port->config.type == TB_TYPE_PCIE_UP;
464}
465
4f807e47
MW
466static inline bool tb_port_is_dpin(const struct tb_port *port)
467{
468 return port && port->config.type == TB_TYPE_DP_HDMI_IN;
469}
470
471static inline bool tb_port_is_dpout(const struct tb_port *port)
472{
473 return port && port->config.type == TB_TYPE_DP_HDMI_OUT;
474}
475
e6f81858
RM
476static inline bool tb_port_is_usb3_down(const struct tb_port *port)
477{
478 return port && port->config.type == TB_TYPE_USB3_DOWN;
479}
480
481static inline bool tb_port_is_usb3_up(const struct tb_port *port)
482{
483 return port && port->config.type == TB_TYPE_USB3_UP;
484}
485
a25c8b2f
AN
486static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
487 enum tb_cfg_space space, u32 offset, u32 length)
488{
4708384f
MW
489 if (sw->is_unplugged)
490 return -ENODEV;
a25c8b2f
AN
491 return tb_cfg_read(sw->tb->ctl,
492 buffer,
493 tb_route(sw),
494 0,
495 space,
496 offset,
497 length);
498}
499
826c6a17 500static inline int tb_sw_write(struct tb_switch *sw, const void *buffer,
a25c8b2f
AN
501 enum tb_cfg_space space, u32 offset, u32 length)
502{
4708384f
MW
503 if (sw->is_unplugged)
504 return -ENODEV;
a25c8b2f
AN
505 return tb_cfg_write(sw->tb->ctl,
506 buffer,
507 tb_route(sw),
508 0,
509 space,
510 offset,
511 length);
512}
513
514static inline int tb_port_read(struct tb_port *port, void *buffer,
515 enum tb_cfg_space space, u32 offset, u32 length)
516{
4708384f
MW
517 if (port->sw->is_unplugged)
518 return -ENODEV;
a25c8b2f
AN
519 return tb_cfg_read(port->sw->tb->ctl,
520 buffer,
521 tb_route(port->sw),
522 port->port,
523 space,
524 offset,
525 length);
526}
527
16a1258a 528static inline int tb_port_write(struct tb_port *port, const void *buffer,
a25c8b2f
AN
529 enum tb_cfg_space space, u32 offset, u32 length)
530{
4708384f
MW
531 if (port->sw->is_unplugged)
532 return -ENODEV;
a25c8b2f
AN
533 return tb_cfg_write(port->sw->tb->ctl,
534 buffer,
535 tb_route(port->sw),
536 port->port,
537 space,
538 offset,
539 length);
540}
541
542#define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg)
543#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
544#define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
545#define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
daa5140f 546#define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg)
a25c8b2f
AN
547
548#define __TB_SW_PRINT(level, sw, fmt, arg...) \
549 do { \
b323a98f 550 const struct tb_switch *__sw = (sw); \
a25c8b2f
AN
551 level(__sw->tb, "%llx: " fmt, \
552 tb_route(__sw), ## arg); \
553 } while (0)
554#define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
555#define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
556#define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
daa5140f 557#define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg)
a25c8b2f
AN
558
559#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
560 do { \
b323a98f 561 const struct tb_port *__port = (_port); \
a25c8b2f
AN
562 level(__port->sw->tb, "%llx:%x: " fmt, \
563 tb_route(__port->sw), __port->port, ## arg); \
564 } while (0)
565#define tb_port_WARN(port, fmt, arg...) \
566 __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg)
567#define tb_port_warn(port, fmt, arg...) \
568 __TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
569#define tb_port_info(port, fmt, arg...) \
570 __TB_PORT_PRINT(tb_info, port, fmt, ##arg)
daa5140f
MW
571#define tb_port_dbg(port, fmt, arg...) \
572 __TB_PORT_PRINT(tb_dbg, port, fmt, ##arg)
a25c8b2f 573
f67cf491 574struct tb *icm_probe(struct tb_nhi *nhi);
9d3cce0b
MW
575struct tb *tb_probe(struct tb_nhi *nhi);
576
9d3cce0b 577extern struct device_type tb_domain_type;
dacb1287 578extern struct device_type tb_retimer_type;
bfe778ac 579extern struct device_type tb_switch_type;
9d3cce0b
MW
580
581int tb_domain_init(void);
582void tb_domain_exit(void);
d1ff7024
MW
583int tb_xdomain_init(void);
584void tb_xdomain_exit(void);
a25c8b2f 585
9d3cce0b
MW
586struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize);
587int tb_domain_add(struct tb *tb);
588void tb_domain_remove(struct tb *tb);
589int tb_domain_suspend_noirq(struct tb *tb);
590int tb_domain_resume_noirq(struct tb *tb);
f67cf491
MW
591int tb_domain_suspend(struct tb *tb);
592void tb_domain_complete(struct tb *tb);
2d8ff0b5
MW
593int tb_domain_runtime_suspend(struct tb *tb);
594int tb_domain_runtime_resume(struct tb *tb);
f67cf491
MW
595int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
596int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
597int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
e6b245cc 598int tb_domain_disconnect_pcie_paths(struct tb *tb);
d1ff7024
MW
599int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
600int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
601int tb_domain_disconnect_all_paths(struct tb *tb);
9d3cce0b 602
559c1e1e
MW
603static inline struct tb *tb_domain_get(struct tb *tb)
604{
605 if (tb)
606 get_device(&tb->dev);
607 return tb;
608}
609
9d3cce0b
MW
610static inline void tb_domain_put(struct tb *tb)
611{
612 put_device(&tb->dev);
613}
d6cc51cd 614
719a5fe8
MW
615struct tb_nvm *tb_nvm_alloc(struct device *dev);
616int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read);
617int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
618 size_t bytes);
619int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
620 nvmem_reg_write_t reg_write);
621void tb_nvm_free(struct tb_nvm *nvm);
622void tb_nvm_exit(void);
623
bfe778ac
MW
624struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
625 u64 route);
e6b245cc
MW
626struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
627 struct device *parent, u64 route);
bfe778ac
MW
628int tb_switch_configure(struct tb_switch *sw);
629int tb_switch_add(struct tb_switch *sw);
630void tb_switch_remove(struct tb_switch *sw);
23dd5bb4
AN
631void tb_switch_suspend(struct tb_switch *sw);
632int tb_switch_resume(struct tb_switch *sw);
633int tb_switch_reset(struct tb *tb, u64 route);
aae20bb6 634void tb_sw_set_unplugged(struct tb_switch *sw);
386e5e29
MW
635struct tb_port *tb_switch_find_port(struct tb_switch *sw,
636 enum tb_port_type type);
f67cf491
MW
637struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
638 u8 depth);
7c39ffe7 639struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
8e9267bb 640struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route);
f67cf491 641
b433d010
MW
642/**
643 * tb_switch_for_each_port() - Iterate over each switch port
644 * @sw: Switch whose ports to iterate
645 * @p: Port used as iterator
646 *
647 * Iterates over each switch port skipping the control port (port %0).
648 */
649#define tb_switch_for_each_port(sw, p) \
650 for ((p) = &(sw)->ports[1]; \
651 (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++)
652
b6b0ea70
MW
653static inline struct tb_switch *tb_switch_get(struct tb_switch *sw)
654{
655 if (sw)
656 get_device(&sw->dev);
657 return sw;
658}
659
bfe778ac
MW
660static inline void tb_switch_put(struct tb_switch *sw)
661{
662 put_device(&sw->dev);
663}
664
665static inline bool tb_is_switch(const struct device *dev)
666{
667 return dev->type == &tb_switch_type;
668}
669
670static inline struct tb_switch *tb_to_switch(struct device *dev)
671{
672 if (tb_is_switch(dev))
673 return container_of(dev, struct tb_switch, dev);
674 return NULL;
675}
676
0414bec5
MW
677static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
678{
679 return tb_to_switch(sw->dev.parent);
680}
681
17a8f815 682static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
8b0110d9
MW
683{
684 return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
685}
686
17a8f815 687static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw)
8b0110d9
MW
688{
689 return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
690}
691
17a8f815 692static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw)
99cabbb0
MW
693{
694 switch (sw->config.device_id) {
695 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
696 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
697 return true;
698 default:
699 return false;
700 }
701}
702
17a8f815 703static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw)
99cabbb0
MW
704{
705 switch (sw->config.device_id) {
706 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
707 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
708 return true;
709 default:
710 return false;
711 }
712}
713
7bffd97e
MW
714static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw)
715{
716 switch (sw->config.device_id) {
717 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
718 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
719 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
720 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
721 return true;
722 default:
723 return false;
724 }
725}
726
727static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
728{
729 switch (sw->config.device_id) {
730 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
731 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
732 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
733 return true;
734 default:
735 return false;
736 }
737}
738
b0407983
MW
739/**
740 * tb_switch_is_usb4() - Is the switch USB4 compliant
741 * @sw: Switch to check
742 *
743 * Returns true if the @sw is USB4 compliant router, false otherwise.
744 */
745static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
746{
747 return sw->config.thunderbolt_version == USB4_VERSION_1_0;
748}
749
f07a3608
MW
750/**
751 * tb_switch_is_icm() - Is the switch handled by ICM firmware
752 * @sw: Switch to check
753 *
754 * In case there is a need to differentiate whether ICM firmware or SW CM
755 * is handling @sw this function can be called. It is valid to call this
756 * after tb_switch_alloc() and tb_switch_configure() has been called
757 * (latter only for SW CM case).
758 */
759static inline bool tb_switch_is_icm(const struct tb_switch *sw)
760{
761 return !sw->config.enabled;
762}
763
91c0c120
MW
764int tb_switch_lane_bonding_enable(struct tb_switch *sw);
765void tb_switch_lane_bonding_disable(struct tb_switch *sw);
766
8afe909b
MW
767bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
768int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
769void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
770
cf29b9af
RM
771int tb_switch_tmu_init(struct tb_switch *sw);
772int tb_switch_tmu_post_time(struct tb_switch *sw);
773int tb_switch_tmu_disable(struct tb_switch *sw);
774int tb_switch_tmu_enable(struct tb_switch *sw);
775
776static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
777{
778 return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI &&
779 !sw->tmu.unidirectional;
780}
781
9da672a4 782int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
520b6702 783int tb_port_add_nfc_credits(struct tb_port *port, int credits);
44242d6c 784int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
520b6702 785int tb_port_clear_counter(struct tb_port *port, int counter);
b0407983 786int tb_port_unlock(struct tb_port *port);
0b2863ac
MW
787int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
788void tb_port_release_in_hopid(struct tb_port *port, int hopid);
789int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
790void tb_port_release_out_hopid(struct tb_port *port, int hopid);
fb19fac1
MW
791struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
792 struct tb_port *prev);
9da672a4 793
c64c3f3a
MW
794/**
795 * tb_for_each_port_on_path() - Iterate over each port on path
796 * @src: Source port
797 * @dst: Destination port
798 * @p: Port used as iterator
799 *
800 * Walks over each port on path from @src to @dst.
801 */
802#define tb_for_each_port_on_path(src, dst, p) \
803 for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \
804 (p) = tb_next_port_on_path((src), (dst), (p)))
805
5b7b8c0a
MW
806int tb_port_get_link_speed(struct tb_port *port);
807
da2da04b 808int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
aa43a9dc 809int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
da2da04b 810int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
e78db6f0 811bool tb_port_is_enabled(struct tb_port *port);
e2b8785e 812
e6f81858
RM
813bool tb_usb3_port_is_enabled(struct tb_port *port);
814int tb_usb3_port_enable(struct tb_port *port, bool enable);
815
0414bec5 816bool tb_pci_port_is_enabled(struct tb_port *port);
93f36ade
MW
817int tb_pci_port_enable(struct tb_port *port, bool enable);
818
4f807e47
MW
819int tb_dp_port_hpd_is_active(struct tb_port *port);
820int tb_dp_port_hpd_clear(struct tb_port *port);
821int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
822 unsigned int aux_tx, unsigned int aux_rx);
823bool tb_dp_port_is_enabled(struct tb_port *port);
824int tb_dp_port_enable(struct tb_port *port, bool enable);
825
0414bec5
MW
826struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
827 struct tb_port *dst, int dst_hopid,
828 struct tb_port **last, const char *name);
8c7acaaf
MW
829struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
830 struct tb_port *dst, int dst_hopid, int link_nr,
831 const char *name);
520b6702
AN
832void tb_path_free(struct tb_path *path);
833int tb_path_activate(struct tb_path *path);
834void tb_path_deactivate(struct tb_path *path);
835bool tb_path_is_invalid(struct tb_path *path);
0bd680cd
MW
836bool tb_path_port_on_path(const struct tb_path *path,
837 const struct tb_port *port);
520b6702 838
cd22e73b
AN
839int tb_drom_read(struct tb_switch *sw);
840int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
c90553b3 841
a9be5582 842int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
e879a709
MW
843int tb_lc_configure_link(struct tb_switch *sw);
844void tb_lc_unconfigure_link(struct tb_switch *sw);
5480dfc2 845int tb_lc_set_sleep(struct tb_switch *sw);
91c0c120 846bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
8afe909b
MW
847bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in);
848int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in);
849int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in);
a25c8b2f
AN
850
851static inline int tb_route_length(u64 route)
852{
853 return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
854}
855
9da672a4
AN
856/**
857 * tb_downstream_route() - get route to downstream switch
858 *
859 * Port must not be the upstream port (otherwise a loop is created).
860 *
861 * Return: Returns a route to the switch behind @port.
862 */
863static inline u64 tb_downstream_route(struct tb_port *port)
864{
865 return tb_route(port->sw)
866 | ((u64) port->port << (port->sw->config.depth * 8));
867}
868
d1ff7024
MW
869bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
870 const void *buf, size_t size);
871struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
872 u64 route, const uuid_t *local_uuid,
873 const uuid_t *remote_uuid);
874void tb_xdomain_add(struct tb_xdomain *xd);
875void tb_xdomain_remove(struct tb_xdomain *xd);
876struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
877 u8 depth);
878
dacb1287
KK
879int tb_retimer_scan(struct tb_port *port);
880void tb_retimer_remove_all(struct tb_port *port);
881
882static inline bool tb_is_retimer(const struct device *dev)
883{
884 return dev->type == &tb_retimer_type;
885}
886
887static inline struct tb_retimer *tb_to_retimer(struct device *dev)
888{
889 if (tb_is_retimer(dev))
890 return container_of(dev, struct tb_retimer, dev);
891 return NULL;
892}
893
b0407983
MW
894int usb4_switch_setup(struct tb_switch *sw);
895int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
896int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
897 size_t size);
898int usb4_switch_configure_link(struct tb_switch *sw);
899void usb4_switch_unconfigure_link(struct tb_switch *sw);
900bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
901int usb4_switch_set_sleep(struct tb_switch *sw);
902int usb4_switch_nvm_sector_size(struct tb_switch *sw);
903int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
904 size_t size);
905int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
906 const void *buf, size_t size);
907int usb4_switch_nvm_authenticate(struct tb_switch *sw);
908bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
909int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
910int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
911struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
912 const struct tb_port *port);
e6f81858
RM
913struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
914 const struct tb_port *port);
b0407983
MW
915
916int usb4_port_unlock(struct tb_port *port);
02d12855
RM
917int usb4_port_enumerate_retimers(struct tb_port *port);
918
919int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
920 u8 size);
921int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
922 const void *buf, u8 size);
923int usb4_port_retimer_is_last(struct tb_port *port, u8 index);
924int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index);
925int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index,
926 unsigned int address, const void *buf,
927 size_t size);
928int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index);
929int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
930 u32 *status);
931int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
932 unsigned int address, void *buf, size_t size);
3b1d8d57
MW
933
934int usb4_usb3_port_max_link_rate(struct tb_port *port);
935int usb4_usb3_port_actual_link_rate(struct tb_port *port);
936int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
937 int *downstream_bw);
938int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
939 int *downstream_bw);
940int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
941 int *downstream_bw);
d6cc51cd 942#endif
This page took 0.640207 seconds and 4 git commands to generate.