]>
Commit | Line | Data |
---|---|---|
fd3b339c | 1 | // SPDX-License-Identifier: GPL-2.0 |
d1ff7024 MW |
2 | /* |
3 | * Thunderbolt XDomain discovery protocol support | |
4 | * | |
5 | * Copyright (C) 2017, Intel Corporation | |
6 | * Authors: Michael Jamet <[email protected]> | |
7 | * Mika Westerberg <[email protected]> | |
d1ff7024 MW |
8 | */ |
9 | ||
10 | #include <linux/device.h> | |
11 | #include <linux/kmod.h> | |
12 | #include <linux/module.h> | |
2d8ff0b5 | 13 | #include <linux/pm_runtime.h> |
d1ff7024 MW |
14 | #include <linux/utsname.h> |
15 | #include <linux/uuid.h> | |
16 | #include <linux/workqueue.h> | |
17 | ||
18 | #include "tb.h" | |
19 | ||
20 | #define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */ | |
21 | #define XDOMAIN_PROPERTIES_RETRIES 60 | |
22 | #define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10 | |
23 | ||
24 | struct xdomain_request_work { | |
25 | struct work_struct work; | |
26 | struct tb_xdp_header *pkg; | |
27 | struct tb *tb; | |
28 | }; | |
29 | ||
30 | /* Serializes access to the properties and protocol handlers below */ | |
31 | static DEFINE_MUTEX(xdomain_lock); | |
32 | ||
33 | /* Properties exposed to the remote domains */ | |
34 | static struct tb_property_dir *xdomain_property_dir; | |
35 | static u32 *xdomain_property_block; | |
36 | static u32 xdomain_property_block_len; | |
37 | static u32 xdomain_property_block_gen; | |
38 | ||
39 | /* Additional protocol handlers */ | |
40 | static LIST_HEAD(protocol_handlers); | |
41 | ||
42 | /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */ | |
43 | static const uuid_t tb_xdp_uuid = | |
44 | UUID_INIT(0xb638d70e, 0x42ff, 0x40bb, | |
45 | 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07); | |
46 | ||
47 | static bool tb_xdomain_match(const struct tb_cfg_request *req, | |
48 | const struct ctl_pkg *pkg) | |
49 | { | |
50 | switch (pkg->frame.eof) { | |
51 | case TB_CFG_PKG_ERROR: | |
52 | return true; | |
53 | ||
54 | case TB_CFG_PKG_XDOMAIN_RESP: { | |
55 | const struct tb_xdp_header *res_hdr = pkg->buffer; | |
56 | const struct tb_xdp_header *req_hdr = req->request; | |
d1ff7024 MW |
57 | |
58 | if (pkg->frame.size < req->response_size / 4) | |
59 | return false; | |
60 | ||
61 | /* Make sure route matches */ | |
62 | if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) != | |
63 | req_hdr->xd_hdr.route_hi) | |
64 | return false; | |
65 | if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo) | |
66 | return false; | |
67 | ||
d1ff7024 MW |
68 | /* Check that the XDomain protocol matches */ |
69 | if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid)) | |
70 | return false; | |
71 | ||
72 | return true; | |
73 | } | |
74 | ||
75 | default: | |
76 | return false; | |
77 | } | |
78 | } | |
79 | ||
80 | static bool tb_xdomain_copy(struct tb_cfg_request *req, | |
81 | const struct ctl_pkg *pkg) | |
82 | { | |
83 | memcpy(req->response, pkg->buffer, req->response_size); | |
84 | req->result.err = 0; | |
85 | return true; | |
86 | } | |
87 | ||
88 | static void response_ready(void *data) | |
89 | { | |
90 | tb_cfg_request_put(data); | |
91 | } | |
92 | ||
93 | static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response, | |
94 | size_t size, enum tb_cfg_pkg_type type) | |
95 | { | |
96 | struct tb_cfg_request *req; | |
97 | ||
98 | req = tb_cfg_request_alloc(); | |
99 | if (!req) | |
100 | return -ENOMEM; | |
101 | ||
102 | req->match = tb_xdomain_match; | |
103 | req->copy = tb_xdomain_copy; | |
104 | req->request = response; | |
105 | req->request_size = size; | |
106 | req->request_type = type; | |
107 | ||
108 | return tb_cfg_request(ctl, req, response_ready, req); | |
109 | } | |
110 | ||
111 | /** | |
112 | * tb_xdomain_response() - Send a XDomain response message | |
113 | * @xd: XDomain to send the message | |
114 | * @response: Response to send | |
115 | * @size: Size of the response | |
116 | * @type: PDF type of the response | |
117 | * | |
118 | * This can be used to send a XDomain response message to the other | |
119 | * domain. No response for the message is expected. | |
120 | * | |
121 | * Return: %0 in case of success and negative errno in case of failure | |
122 | */ | |
123 | int tb_xdomain_response(struct tb_xdomain *xd, const void *response, | |
124 | size_t size, enum tb_cfg_pkg_type type) | |
125 | { | |
126 | return __tb_xdomain_response(xd->tb->ctl, response, size, type); | |
127 | } | |
128 | EXPORT_SYMBOL_GPL(tb_xdomain_response); | |
129 | ||
130 | static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request, | |
131 | size_t request_size, enum tb_cfg_pkg_type request_type, void *response, | |
132 | size_t response_size, enum tb_cfg_pkg_type response_type, | |
133 | unsigned int timeout_msec) | |
134 | { | |
135 | struct tb_cfg_request *req; | |
136 | struct tb_cfg_result res; | |
137 | ||
138 | req = tb_cfg_request_alloc(); | |
139 | if (!req) | |
140 | return -ENOMEM; | |
141 | ||
142 | req->match = tb_xdomain_match; | |
143 | req->copy = tb_xdomain_copy; | |
144 | req->request = request; | |
145 | req->request_size = request_size; | |
146 | req->request_type = request_type; | |
147 | req->response = response; | |
148 | req->response_size = response_size; | |
149 | req->response_type = response_type; | |
150 | ||
151 | res = tb_cfg_request_sync(ctl, req, timeout_msec); | |
152 | ||
153 | tb_cfg_request_put(req); | |
154 | ||
155 | return res.err == 1 ? -EIO : res.err; | |
156 | } | |
157 | ||
158 | /** | |
159 | * tb_xdomain_request() - Send a XDomain request | |
160 | * @xd: XDomain to send the request | |
161 | * @request: Request to send | |
162 | * @request_size: Size of the request in bytes | |
163 | * @request_type: PDF type of the request | |
164 | * @response: Response is copied here | |
165 | * @response_size: Expected size of the response in bytes | |
166 | * @response_type: Expected PDF type of the response | |
167 | * @timeout_msec: Timeout in milliseconds to wait for the response | |
168 | * | |
169 | * This function can be used to send XDomain control channel messages to | |
170 | * the other domain. The function waits until the response is received | |
171 | * or when timeout triggers. Whichever comes first. | |
172 | * | |
173 | * Return: %0 in case of success and negative errno in case of failure | |
174 | */ | |
175 | int tb_xdomain_request(struct tb_xdomain *xd, const void *request, | |
176 | size_t request_size, enum tb_cfg_pkg_type request_type, | |
177 | void *response, size_t response_size, | |
178 | enum tb_cfg_pkg_type response_type, unsigned int timeout_msec) | |
179 | { | |
180 | return __tb_xdomain_request(xd->tb->ctl, request, request_size, | |
181 | request_type, response, response_size, | |
182 | response_type, timeout_msec); | |
183 | } | |
184 | EXPORT_SYMBOL_GPL(tb_xdomain_request); | |
185 | ||
186 | static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route, | |
187 | u8 sequence, enum tb_xdp_type type, size_t size) | |
188 | { | |
189 | u32 length_sn; | |
190 | ||
191 | length_sn = (size - sizeof(hdr->xd_hdr)) / 4; | |
192 | length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK; | |
193 | ||
194 | hdr->xd_hdr.route_hi = upper_32_bits(route); | |
195 | hdr->xd_hdr.route_lo = lower_32_bits(route); | |
196 | hdr->xd_hdr.length_sn = length_sn; | |
197 | hdr->type = type; | |
198 | memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid)); | |
199 | } | |
200 | ||
201 | static int tb_xdp_handle_error(const struct tb_xdp_header *hdr) | |
202 | { | |
203 | const struct tb_xdp_error_response *error; | |
204 | ||
205 | if (hdr->type != ERROR_RESPONSE) | |
206 | return 0; | |
207 | ||
208 | error = (const struct tb_xdp_error_response *)hdr; | |
209 | ||
210 | switch (error->error) { | |
211 | case ERROR_UNKNOWN_PACKET: | |
212 | case ERROR_UNKNOWN_DOMAIN: | |
213 | return -EIO; | |
214 | case ERROR_NOT_SUPPORTED: | |
215 | return -ENOTSUPP; | |
216 | case ERROR_NOT_READY: | |
217 | return -EAGAIN; | |
218 | default: | |
219 | break; | |
220 | } | |
221 | ||
222 | return 0; | |
223 | } | |
224 | ||
225 | static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence, | |
226 | enum tb_xdp_error error) | |
227 | { | |
228 | struct tb_xdp_error_response res; | |
229 | ||
230 | memset(&res, 0, sizeof(res)); | |
231 | tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE, | |
232 | sizeof(res)); | |
233 | res.error = error; | |
234 | ||
235 | return __tb_xdomain_response(ctl, &res, sizeof(res), | |
236 | TB_CFG_PKG_XDOMAIN_RESP); | |
237 | } | |
238 | ||
239 | static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route, | |
240 | const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry, | |
241 | u32 **block, u32 *generation) | |
242 | { | |
243 | struct tb_xdp_properties_response *res; | |
244 | struct tb_xdp_properties req; | |
245 | u16 data_len, len; | |
246 | size_t total_size; | |
247 | u32 *data = NULL; | |
248 | int ret; | |
249 | ||
250 | total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4; | |
251 | res = kzalloc(total_size, GFP_KERNEL); | |
252 | if (!res) | |
253 | return -ENOMEM; | |
254 | ||
255 | memset(&req, 0, sizeof(req)); | |
256 | tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST, | |
257 | sizeof(req)); | |
258 | memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid)); | |
259 | memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid)); | |
260 | ||
261 | len = 0; | |
262 | data_len = 0; | |
263 | ||
264 | do { | |
265 | ret = __tb_xdomain_request(ctl, &req, sizeof(req), | |
266 | TB_CFG_PKG_XDOMAIN_REQ, res, | |
267 | total_size, TB_CFG_PKG_XDOMAIN_RESP, | |
268 | XDOMAIN_DEFAULT_TIMEOUT); | |
269 | if (ret) | |
270 | goto err; | |
271 | ||
272 | ret = tb_xdp_handle_error(&res->hdr); | |
273 | if (ret) | |
274 | goto err; | |
275 | ||
276 | /* | |
277 | * Package length includes the whole payload without the | |
278 | * XDomain header. Validate first that the package is at | |
279 | * least size of the response structure. | |
280 | */ | |
281 | len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK; | |
282 | if (len < sizeof(*res) / 4) { | |
283 | ret = -EINVAL; | |
284 | goto err; | |
285 | } | |
286 | ||
287 | len += sizeof(res->hdr.xd_hdr) / 4; | |
288 | len -= sizeof(*res) / 4; | |
289 | ||
290 | if (res->offset != req.offset) { | |
291 | ret = -EINVAL; | |
292 | goto err; | |
293 | } | |
294 | ||
295 | /* | |
296 | * First time allocate block that has enough space for | |
297 | * the whole properties block. | |
298 | */ | |
299 | if (!data) { | |
300 | data_len = res->data_length; | |
301 | if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) { | |
302 | ret = -E2BIG; | |
303 | goto err; | |
304 | } | |
305 | ||
306 | data = kcalloc(data_len, sizeof(u32), GFP_KERNEL); | |
307 | if (!data) { | |
308 | ret = -ENOMEM; | |
309 | goto err; | |
310 | } | |
311 | } | |
312 | ||
313 | memcpy(data + req.offset, res->data, len * 4); | |
314 | req.offset += len; | |
315 | } while (!data_len || req.offset < data_len); | |
316 | ||
317 | *block = data; | |
318 | *generation = res->generation; | |
319 | ||
320 | kfree(res); | |
321 | ||
322 | return data_len; | |
323 | ||
324 | err: | |
325 | kfree(data); | |
326 | kfree(res); | |
327 | ||
328 | return ret; | |
329 | } | |
330 | ||
331 | static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl, | |
332 | u64 route, u8 sequence, const uuid_t *src_uuid, | |
333 | const struct tb_xdp_properties *req) | |
334 | { | |
335 | struct tb_xdp_properties_response *res; | |
336 | size_t total_size; | |
337 | u16 len; | |
338 | int ret; | |
339 | ||
340 | /* | |
341 | * Currently we expect all requests to be directed to us. The | |
342 | * protocol supports forwarding, though which we might add | |
343 | * support later on. | |
344 | */ | |
345 | if (!uuid_equal(src_uuid, &req->dst_uuid)) { | |
346 | tb_xdp_error_response(ctl, route, sequence, | |
347 | ERROR_UNKNOWN_DOMAIN); | |
348 | return 0; | |
349 | } | |
350 | ||
351 | mutex_lock(&xdomain_lock); | |
352 | ||
353 | if (req->offset >= xdomain_property_block_len) { | |
354 | mutex_unlock(&xdomain_lock); | |
355 | return -EINVAL; | |
356 | } | |
357 | ||
358 | len = xdomain_property_block_len - req->offset; | |
359 | len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH); | |
360 | total_size = sizeof(*res) + len * 4; | |
361 | ||
362 | res = kzalloc(total_size, GFP_KERNEL); | |
363 | if (!res) { | |
364 | mutex_unlock(&xdomain_lock); | |
365 | return -ENOMEM; | |
366 | } | |
367 | ||
368 | tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE, | |
369 | total_size); | |
370 | res->generation = xdomain_property_block_gen; | |
371 | res->data_length = xdomain_property_block_len; | |
372 | res->offset = req->offset; | |
373 | uuid_copy(&res->src_uuid, src_uuid); | |
374 | uuid_copy(&res->dst_uuid, &req->src_uuid); | |
375 | memcpy(res->data, &xdomain_property_block[req->offset], len * 4); | |
376 | ||
377 | mutex_unlock(&xdomain_lock); | |
378 | ||
379 | ret = __tb_xdomain_response(ctl, res, total_size, | |
380 | TB_CFG_PKG_XDOMAIN_RESP); | |
381 | ||
382 | kfree(res); | |
383 | return ret; | |
384 | } | |
385 | ||
386 | static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route, | |
387 | int retry, const uuid_t *uuid) | |
388 | { | |
389 | struct tb_xdp_properties_changed_response res; | |
390 | struct tb_xdp_properties_changed req; | |
391 | int ret; | |
392 | ||
393 | memset(&req, 0, sizeof(req)); | |
394 | tb_xdp_fill_header(&req.hdr, route, retry % 4, | |
395 | PROPERTIES_CHANGED_REQUEST, sizeof(req)); | |
396 | uuid_copy(&req.src_uuid, uuid); | |
397 | ||
398 | memset(&res, 0, sizeof(res)); | |
399 | ret = __tb_xdomain_request(ctl, &req, sizeof(req), | |
400 | TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res), | |
401 | TB_CFG_PKG_XDOMAIN_RESP, | |
402 | XDOMAIN_DEFAULT_TIMEOUT); | |
403 | if (ret) | |
404 | return ret; | |
405 | ||
406 | return tb_xdp_handle_error(&res.hdr); | |
407 | } | |
408 | ||
409 | static int | |
410 | tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence) | |
411 | { | |
412 | struct tb_xdp_properties_changed_response res; | |
413 | ||
414 | memset(&res, 0, sizeof(res)); | |
415 | tb_xdp_fill_header(&res.hdr, route, sequence, | |
416 | PROPERTIES_CHANGED_RESPONSE, sizeof(res)); | |
417 | return __tb_xdomain_response(ctl, &res, sizeof(res), | |
418 | TB_CFG_PKG_XDOMAIN_RESP); | |
419 | } | |
420 | ||
421 | /** | |
422 | * tb_register_protocol_handler() - Register protocol handler | |
423 | * @handler: Handler to register | |
424 | * | |
425 | * This allows XDomain service drivers to hook into incoming XDomain | |
426 | * messages. After this function is called the service driver needs to | |
427 | * be able to handle calls to callback whenever a package with the | |
428 | * registered protocol is received. | |
429 | */ | |
430 | int tb_register_protocol_handler(struct tb_protocol_handler *handler) | |
431 | { | |
432 | if (!handler->uuid || !handler->callback) | |
433 | return -EINVAL; | |
434 | if (uuid_equal(handler->uuid, &tb_xdp_uuid)) | |
435 | return -EINVAL; | |
436 | ||
437 | mutex_lock(&xdomain_lock); | |
438 | list_add_tail(&handler->list, &protocol_handlers); | |
439 | mutex_unlock(&xdomain_lock); | |
440 | ||
441 | return 0; | |
442 | } | |
443 | EXPORT_SYMBOL_GPL(tb_register_protocol_handler); | |
444 | ||
445 | /** | |
446 | * tb_unregister_protocol_handler() - Unregister protocol handler | |
447 | * @handler: Handler to unregister | |
448 | * | |
449 | * Removes the previously registered protocol handler. | |
450 | */ | |
451 | void tb_unregister_protocol_handler(struct tb_protocol_handler *handler) | |
452 | { | |
453 | mutex_lock(&xdomain_lock); | |
454 | list_del_init(&handler->list); | |
455 | mutex_unlock(&xdomain_lock); | |
456 | } | |
457 | EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler); | |
458 | ||
459 | static void tb_xdp_handle_request(struct work_struct *work) | |
460 | { | |
461 | struct xdomain_request_work *xw = container_of(work, typeof(*xw), work); | |
462 | const struct tb_xdp_header *pkg = xw->pkg; | |
463 | const struct tb_xdomain_header *xhdr = &pkg->xd_hdr; | |
464 | struct tb *tb = xw->tb; | |
465 | struct tb_ctl *ctl = tb->ctl; | |
466 | const uuid_t *uuid; | |
467 | int ret = 0; | |
9a03c3d3 | 468 | u32 sequence; |
d1ff7024 MW |
469 | u64 route; |
470 | ||
471 | route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63); | |
472 | sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK; | |
473 | sequence >>= TB_XDOMAIN_SN_SHIFT; | |
474 | ||
475 | mutex_lock(&tb->lock); | |
476 | if (tb->root_switch) | |
477 | uuid = tb->root_switch->uuid; | |
478 | else | |
479 | uuid = NULL; | |
480 | mutex_unlock(&tb->lock); | |
481 | ||
482 | if (!uuid) { | |
483 | tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY); | |
484 | goto out; | |
485 | } | |
486 | ||
487 | switch (pkg->type) { | |
488 | case PROPERTIES_REQUEST: | |
489 | ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid, | |
490 | (const struct tb_xdp_properties *)pkg); | |
491 | break; | |
492 | ||
493 | case PROPERTIES_CHANGED_REQUEST: { | |
494 | const struct tb_xdp_properties_changed *xchg = | |
495 | (const struct tb_xdp_properties_changed *)pkg; | |
496 | struct tb_xdomain *xd; | |
497 | ||
498 | ret = tb_xdp_properties_changed_response(ctl, route, sequence); | |
499 | ||
500 | /* | |
501 | * Since the properties have been changed, let's update | |
502 | * the xdomain related to this connection as well in | |
503 | * case there is a change in services it offers. | |
504 | */ | |
505 | xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid); | |
506 | if (xd) { | |
507 | queue_delayed_work(tb->wq, &xd->get_properties_work, | |
508 | msecs_to_jiffies(50)); | |
509 | tb_xdomain_put(xd); | |
510 | } | |
511 | ||
512 | break; | |
513 | } | |
514 | ||
515 | default: | |
516 | break; | |
517 | } | |
518 | ||
519 | if (ret) { | |
520 | tb_warn(tb, "failed to send XDomain response for %#x\n", | |
521 | pkg->type); | |
522 | } | |
523 | ||
524 | out: | |
525 | kfree(xw->pkg); | |
526 | kfree(xw); | |
559c1e1e MW |
527 | |
528 | tb_domain_put(tb); | |
d1ff7024 MW |
529 | } |
530 | ||
48f40b96 | 531 | static bool |
d1ff7024 MW |
532 | tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr, |
533 | size_t size) | |
534 | { | |
535 | struct xdomain_request_work *xw; | |
536 | ||
537 | xw = kmalloc(sizeof(*xw), GFP_KERNEL); | |
538 | if (!xw) | |
48f40b96 | 539 | return false; |
d1ff7024 MW |
540 | |
541 | INIT_WORK(&xw->work, tb_xdp_handle_request); | |
542 | xw->pkg = kmemdup(hdr, size, GFP_KERNEL); | |
48f40b96 AP |
543 | if (!xw->pkg) { |
544 | kfree(xw); | |
545 | return false; | |
546 | } | |
559c1e1e | 547 | xw->tb = tb_domain_get(tb); |
d1ff7024 | 548 | |
559c1e1e | 549 | schedule_work(&xw->work); |
48f40b96 | 550 | return true; |
d1ff7024 MW |
551 | } |
552 | ||
553 | /** | |
554 | * tb_register_service_driver() - Register XDomain service driver | |
555 | * @drv: Driver to register | |
556 | * | |
557 | * Registers new service driver from @drv to the bus. | |
558 | */ | |
559 | int tb_register_service_driver(struct tb_service_driver *drv) | |
560 | { | |
561 | drv->driver.bus = &tb_bus_type; | |
562 | return driver_register(&drv->driver); | |
563 | } | |
564 | EXPORT_SYMBOL_GPL(tb_register_service_driver); | |
565 | ||
566 | /** | |
567 | * tb_unregister_service_driver() - Unregister XDomain service driver | |
568 | * @xdrv: Driver to unregister | |
569 | * | |
570 | * Unregisters XDomain service driver from the bus. | |
571 | */ | |
572 | void tb_unregister_service_driver(struct tb_service_driver *drv) | |
573 | { | |
574 | driver_unregister(&drv->driver); | |
575 | } | |
576 | EXPORT_SYMBOL_GPL(tb_unregister_service_driver); | |
577 | ||
578 | static ssize_t key_show(struct device *dev, struct device_attribute *attr, | |
579 | char *buf) | |
580 | { | |
581 | struct tb_service *svc = container_of(dev, struct tb_service, dev); | |
582 | ||
583 | /* | |
584 | * It should be null terminated but anything else is pretty much | |
585 | * allowed. | |
586 | */ | |
587 | return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key); | |
588 | } | |
589 | static DEVICE_ATTR_RO(key); | |
590 | ||
591 | static int get_modalias(struct tb_service *svc, char *buf, size_t size) | |
592 | { | |
593 | return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key, | |
594 | svc->prtcid, svc->prtcvers, svc->prtcrevs); | |
595 | } | |
596 | ||
597 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | |
598 | char *buf) | |
599 | { | |
600 | struct tb_service *svc = container_of(dev, struct tb_service, dev); | |
601 | ||
602 | /* Full buffer size except new line and null termination */ | |
603 | get_modalias(svc, buf, PAGE_SIZE - 2); | |
604 | return sprintf(buf, "%s\n", buf); | |
605 | } | |
606 | static DEVICE_ATTR_RO(modalias); | |
607 | ||
608 | static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr, | |
609 | char *buf) | |
610 | { | |
611 | struct tb_service *svc = container_of(dev, struct tb_service, dev); | |
612 | ||
613 | return sprintf(buf, "%u\n", svc->prtcid); | |
614 | } | |
615 | static DEVICE_ATTR_RO(prtcid); | |
616 | ||
617 | static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr, | |
618 | char *buf) | |
619 | { | |
620 | struct tb_service *svc = container_of(dev, struct tb_service, dev); | |
621 | ||
622 | return sprintf(buf, "%u\n", svc->prtcvers); | |
623 | } | |
624 | static DEVICE_ATTR_RO(prtcvers); | |
625 | ||
626 | static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr, | |
627 | char *buf) | |
628 | { | |
629 | struct tb_service *svc = container_of(dev, struct tb_service, dev); | |
630 | ||
631 | return sprintf(buf, "%u\n", svc->prtcrevs); | |
632 | } | |
633 | static DEVICE_ATTR_RO(prtcrevs); | |
634 | ||
635 | static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr, | |
636 | char *buf) | |
637 | { | |
638 | struct tb_service *svc = container_of(dev, struct tb_service, dev); | |
639 | ||
640 | return sprintf(buf, "0x%08x\n", svc->prtcstns); | |
641 | } | |
642 | static DEVICE_ATTR_RO(prtcstns); | |
643 | ||
644 | static struct attribute *tb_service_attrs[] = { | |
645 | &dev_attr_key.attr, | |
646 | &dev_attr_modalias.attr, | |
647 | &dev_attr_prtcid.attr, | |
648 | &dev_attr_prtcvers.attr, | |
649 | &dev_attr_prtcrevs.attr, | |
650 | &dev_attr_prtcstns.attr, | |
651 | NULL, | |
652 | }; | |
653 | ||
654 | static struct attribute_group tb_service_attr_group = { | |
655 | .attrs = tb_service_attrs, | |
656 | }; | |
657 | ||
658 | static const struct attribute_group *tb_service_attr_groups[] = { | |
659 | &tb_service_attr_group, | |
660 | NULL, | |
661 | }; | |
662 | ||
663 | static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env) | |
664 | { | |
665 | struct tb_service *svc = container_of(dev, struct tb_service, dev); | |
666 | char modalias[64]; | |
667 | ||
668 | get_modalias(svc, modalias, sizeof(modalias)); | |
669 | return add_uevent_var(env, "MODALIAS=%s", modalias); | |
670 | } | |
671 | ||
672 | static void tb_service_release(struct device *dev) | |
673 | { | |
674 | struct tb_service *svc = container_of(dev, struct tb_service, dev); | |
675 | struct tb_xdomain *xd = tb_service_parent(svc); | |
676 | ||
677 | ida_simple_remove(&xd->service_ids, svc->id); | |
678 | kfree(svc->key); | |
679 | kfree(svc); | |
680 | } | |
681 | ||
682 | struct device_type tb_service_type = { | |
683 | .name = "thunderbolt_service", | |
684 | .groups = tb_service_attr_groups, | |
685 | .uevent = tb_service_uevent, | |
686 | .release = tb_service_release, | |
687 | }; | |
688 | EXPORT_SYMBOL_GPL(tb_service_type); | |
689 | ||
690 | static int remove_missing_service(struct device *dev, void *data) | |
691 | { | |
692 | struct tb_xdomain *xd = data; | |
693 | struct tb_service *svc; | |
694 | ||
695 | svc = tb_to_service(dev); | |
696 | if (!svc) | |
697 | return 0; | |
698 | ||
699 | if (!tb_property_find(xd->properties, svc->key, | |
700 | TB_PROPERTY_TYPE_DIRECTORY)) | |
701 | device_unregister(dev); | |
702 | ||
703 | return 0; | |
704 | } | |
705 | ||
706 | static int find_service(struct device *dev, void *data) | |
707 | { | |
708 | const struct tb_property *p = data; | |
709 | struct tb_service *svc; | |
710 | ||
711 | svc = tb_to_service(dev); | |
712 | if (!svc) | |
713 | return 0; | |
714 | ||
715 | return !strcmp(svc->key, p->key); | |
716 | } | |
717 | ||
718 | static int populate_service(struct tb_service *svc, | |
719 | struct tb_property *property) | |
720 | { | |
721 | struct tb_property_dir *dir = property->value.dir; | |
722 | struct tb_property *p; | |
723 | ||
724 | /* Fill in standard properties */ | |
725 | p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE); | |
726 | if (p) | |
727 | svc->prtcid = p->value.immediate; | |
728 | p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE); | |
729 | if (p) | |
730 | svc->prtcvers = p->value.immediate; | |
731 | p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE); | |
732 | if (p) | |
733 | svc->prtcrevs = p->value.immediate; | |
734 | p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE); | |
735 | if (p) | |
736 | svc->prtcstns = p->value.immediate; | |
737 | ||
738 | svc->key = kstrdup(property->key, GFP_KERNEL); | |
739 | if (!svc->key) | |
740 | return -ENOMEM; | |
741 | ||
742 | return 0; | |
743 | } | |
744 | ||
745 | static void enumerate_services(struct tb_xdomain *xd) | |
746 | { | |
747 | struct tb_service *svc; | |
748 | struct tb_property *p; | |
749 | struct device *dev; | |
9aabb685 | 750 | int id; |
d1ff7024 MW |
751 | |
752 | /* | |
753 | * First remove all services that are not available anymore in | |
754 | * the updated property block. | |
755 | */ | |
756 | device_for_each_child_reverse(&xd->dev, xd, remove_missing_service); | |
757 | ||
758 | /* Then re-enumerate properties creating new services as we go */ | |
759 | tb_property_for_each(xd->properties, p) { | |
760 | if (p->type != TB_PROPERTY_TYPE_DIRECTORY) | |
761 | continue; | |
762 | ||
763 | /* If the service exists already we are fine */ | |
764 | dev = device_find_child(&xd->dev, p, find_service); | |
765 | if (dev) { | |
766 | put_device(dev); | |
767 | continue; | |
768 | } | |
769 | ||
770 | svc = kzalloc(sizeof(*svc), GFP_KERNEL); | |
771 | if (!svc) | |
772 | break; | |
773 | ||
774 | if (populate_service(svc, p)) { | |
775 | kfree(svc); | |
776 | break; | |
777 | } | |
778 | ||
9aabb685 AP |
779 | id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); |
780 | if (id < 0) { | |
781 | kfree(svc); | |
782 | break; | |
783 | } | |
784 | svc->id = id; | |
d1ff7024 MW |
785 | svc->dev.bus = &tb_bus_type; |
786 | svc->dev.type = &tb_service_type; | |
787 | svc->dev.parent = &xd->dev; | |
788 | dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id); | |
789 | ||
790 | if (device_register(&svc->dev)) { | |
791 | put_device(&svc->dev); | |
792 | break; | |
793 | } | |
794 | } | |
795 | } | |
796 | ||
797 | static int populate_properties(struct tb_xdomain *xd, | |
798 | struct tb_property_dir *dir) | |
799 | { | |
800 | const struct tb_property *p; | |
801 | ||
802 | /* Required properties */ | |
803 | p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE); | |
804 | if (!p) | |
805 | return -EINVAL; | |
806 | xd->device = p->value.immediate; | |
807 | ||
808 | p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE); | |
809 | if (!p) | |
810 | return -EINVAL; | |
811 | xd->vendor = p->value.immediate; | |
812 | ||
813 | kfree(xd->device_name); | |
814 | xd->device_name = NULL; | |
815 | kfree(xd->vendor_name); | |
816 | xd->vendor_name = NULL; | |
817 | ||
818 | /* Optional properties */ | |
819 | p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT); | |
820 | if (p) | |
821 | xd->device_name = kstrdup(p->value.text, GFP_KERNEL); | |
822 | p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT); | |
823 | if (p) | |
824 | xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL); | |
825 | ||
826 | return 0; | |
827 | } | |
828 | ||
829 | /* Called with @xd->lock held */ | |
830 | static void tb_xdomain_restore_paths(struct tb_xdomain *xd) | |
831 | { | |
832 | if (!xd->resume) | |
833 | return; | |
834 | ||
835 | xd->resume = false; | |
836 | if (xd->transmit_path) { | |
837 | dev_dbg(&xd->dev, "re-establishing DMA path\n"); | |
838 | tb_domain_approve_xdomain_paths(xd->tb, xd); | |
839 | } | |
840 | } | |
841 | ||
842 | static void tb_xdomain_get_properties(struct work_struct *work) | |
843 | { | |
844 | struct tb_xdomain *xd = container_of(work, typeof(*xd), | |
845 | get_properties_work.work); | |
846 | struct tb_property_dir *dir; | |
847 | struct tb *tb = xd->tb; | |
848 | bool update = false; | |
849 | u32 *block = NULL; | |
850 | u32 gen = 0; | |
851 | int ret; | |
852 | ||
853 | ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid, | |
854 | xd->remote_uuid, xd->properties_retries, | |
855 | &block, &gen); | |
856 | if (ret < 0) { | |
857 | if (xd->properties_retries-- > 0) { | |
858 | queue_delayed_work(xd->tb->wq, &xd->get_properties_work, | |
859 | msecs_to_jiffies(1000)); | |
860 | } else { | |
861 | /* Give up now */ | |
862 | dev_err(&xd->dev, | |
863 | "failed read XDomain properties from %pUb\n", | |
864 | xd->remote_uuid); | |
865 | } | |
866 | return; | |
867 | } | |
868 | ||
869 | xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; | |
870 | ||
871 | mutex_lock(&xd->lock); | |
872 | ||
873 | /* Only accept newer generation properties */ | |
874 | if (xd->properties && gen <= xd->property_block_gen) { | |
875 | /* | |
876 | * On resume it is likely that the properties block is | |
877 | * not changed (unless the other end added or removed | |
878 | * services). However, we need to make sure the existing | |
879 | * DMA paths are restored properly. | |
880 | */ | |
881 | tb_xdomain_restore_paths(xd); | |
882 | goto err_free_block; | |
883 | } | |
884 | ||
885 | dir = tb_property_parse_dir(block, ret); | |
886 | if (!dir) { | |
887 | dev_err(&xd->dev, "failed to parse XDomain properties\n"); | |
888 | goto err_free_block; | |
889 | } | |
890 | ||
891 | ret = populate_properties(xd, dir); | |
892 | if (ret) { | |
893 | dev_err(&xd->dev, "missing XDomain properties in response\n"); | |
894 | goto err_free_dir; | |
895 | } | |
896 | ||
897 | /* Release the existing one */ | |
898 | if (xd->properties) { | |
899 | tb_property_free_dir(xd->properties); | |
900 | update = true; | |
901 | } | |
902 | ||
903 | xd->properties = dir; | |
904 | xd->property_block_gen = gen; | |
905 | ||
906 | tb_xdomain_restore_paths(xd); | |
907 | ||
908 | mutex_unlock(&xd->lock); | |
909 | ||
910 | kfree(block); | |
911 | ||
912 | /* | |
913 | * Now the device should be ready enough so we can add it to the | |
914 | * bus and let userspace know about it. If the device is already | |
915 | * registered, we notify the userspace that it has changed. | |
916 | */ | |
917 | if (!update) { | |
918 | if (device_add(&xd->dev)) { | |
919 | dev_err(&xd->dev, "failed to add XDomain device\n"); | |
920 | return; | |
921 | } | |
922 | } else { | |
923 | kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); | |
924 | } | |
925 | ||
926 | enumerate_services(xd); | |
927 | return; | |
928 | ||
929 | err_free_dir: | |
930 | tb_property_free_dir(dir); | |
931 | err_free_block: | |
932 | kfree(block); | |
933 | mutex_unlock(&xd->lock); | |
934 | } | |
935 | ||
936 | static void tb_xdomain_properties_changed(struct work_struct *work) | |
937 | { | |
938 | struct tb_xdomain *xd = container_of(work, typeof(*xd), | |
939 | properties_changed_work.work); | |
940 | int ret; | |
941 | ||
942 | ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route, | |
943 | xd->properties_changed_retries, xd->local_uuid); | |
944 | if (ret) { | |
945 | if (xd->properties_changed_retries-- > 0) | |
946 | queue_delayed_work(xd->tb->wq, | |
947 | &xd->properties_changed_work, | |
948 | msecs_to_jiffies(1000)); | |
949 | return; | |
950 | } | |
951 | ||
952 | xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; | |
953 | } | |
954 | ||
955 | static ssize_t device_show(struct device *dev, struct device_attribute *attr, | |
956 | char *buf) | |
957 | { | |
958 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); | |
959 | ||
960 | return sprintf(buf, "%#x\n", xd->device); | |
961 | } | |
962 | static DEVICE_ATTR_RO(device); | |
963 | ||
964 | static ssize_t | |
965 | device_name_show(struct device *dev, struct device_attribute *attr, char *buf) | |
966 | { | |
967 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); | |
968 | int ret; | |
969 | ||
970 | if (mutex_lock_interruptible(&xd->lock)) | |
971 | return -ERESTARTSYS; | |
972 | ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : ""); | |
973 | mutex_unlock(&xd->lock); | |
974 | ||
975 | return ret; | |
976 | } | |
977 | static DEVICE_ATTR_RO(device_name); | |
978 | ||
979 | static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, | |
980 | char *buf) | |
981 | { | |
982 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); | |
983 | ||
984 | return sprintf(buf, "%#x\n", xd->vendor); | |
985 | } | |
986 | static DEVICE_ATTR_RO(vendor); | |
987 | ||
988 | static ssize_t | |
989 | vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) | |
990 | { | |
991 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); | |
992 | int ret; | |
993 | ||
994 | if (mutex_lock_interruptible(&xd->lock)) | |
995 | return -ERESTARTSYS; | |
996 | ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : ""); | |
997 | mutex_unlock(&xd->lock); | |
998 | ||
999 | return ret; | |
1000 | } | |
1001 | static DEVICE_ATTR_RO(vendor_name); | |
1002 | ||
1003 | static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, | |
1004 | char *buf) | |
1005 | { | |
1006 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); | |
1007 | ||
1008 | return sprintf(buf, "%pUb\n", xd->remote_uuid); | |
1009 | } | |
1010 | static DEVICE_ATTR_RO(unique_id); | |
1011 | ||
1012 | static struct attribute *xdomain_attrs[] = { | |
1013 | &dev_attr_device.attr, | |
1014 | &dev_attr_device_name.attr, | |
1015 | &dev_attr_unique_id.attr, | |
1016 | &dev_attr_vendor.attr, | |
1017 | &dev_attr_vendor_name.attr, | |
1018 | NULL, | |
1019 | }; | |
1020 | ||
1021 | static struct attribute_group xdomain_attr_group = { | |
1022 | .attrs = xdomain_attrs, | |
1023 | }; | |
1024 | ||
1025 | static const struct attribute_group *xdomain_attr_groups[] = { | |
1026 | &xdomain_attr_group, | |
1027 | NULL, | |
1028 | }; | |
1029 | ||
1030 | static void tb_xdomain_release(struct device *dev) | |
1031 | { | |
1032 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); | |
1033 | ||
1034 | put_device(xd->dev.parent); | |
1035 | ||
1036 | tb_property_free_dir(xd->properties); | |
1037 | ida_destroy(&xd->service_ids); | |
1038 | ||
1039 | kfree(xd->local_uuid); | |
1040 | kfree(xd->remote_uuid); | |
1041 | kfree(xd->device_name); | |
1042 | kfree(xd->vendor_name); | |
1043 | kfree(xd); | |
1044 | } | |
1045 | ||
1046 | static void start_handshake(struct tb_xdomain *xd) | |
1047 | { | |
1048 | xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; | |
1049 | xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; | |
1050 | ||
1051 | /* Start exchanging properties with the other host */ | |
1052 | queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, | |
1053 | msecs_to_jiffies(100)); | |
1054 | queue_delayed_work(xd->tb->wq, &xd->get_properties_work, | |
1055 | msecs_to_jiffies(1000)); | |
1056 | } | |
1057 | ||
1058 | static void stop_handshake(struct tb_xdomain *xd) | |
1059 | { | |
1060 | xd->properties_retries = 0; | |
1061 | xd->properties_changed_retries = 0; | |
1062 | ||
1063 | cancel_delayed_work_sync(&xd->get_properties_work); | |
1064 | cancel_delayed_work_sync(&xd->properties_changed_work); | |
1065 | } | |
1066 | ||
1067 | static int __maybe_unused tb_xdomain_suspend(struct device *dev) | |
1068 | { | |
1069 | stop_handshake(tb_to_xdomain(dev)); | |
1070 | return 0; | |
1071 | } | |
1072 | ||
1073 | static int __maybe_unused tb_xdomain_resume(struct device *dev) | |
1074 | { | |
1075 | struct tb_xdomain *xd = tb_to_xdomain(dev); | |
1076 | ||
1077 | /* | |
1078 | * Ask tb_xdomain_get_properties() restore any existing DMA | |
1079 | * paths after properties are re-read. | |
1080 | */ | |
1081 | xd->resume = true; | |
1082 | start_handshake(xd); | |
1083 | ||
1084 | return 0; | |
1085 | } | |
1086 | ||
1087 | static const struct dev_pm_ops tb_xdomain_pm_ops = { | |
1088 | SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume) | |
1089 | }; | |
1090 | ||
1091 | struct device_type tb_xdomain_type = { | |
1092 | .name = "thunderbolt_xdomain", | |
1093 | .release = tb_xdomain_release, | |
1094 | .pm = &tb_xdomain_pm_ops, | |
1095 | }; | |
1096 | EXPORT_SYMBOL_GPL(tb_xdomain_type); | |
1097 | ||
1098 | /** | |
1099 | * tb_xdomain_alloc() - Allocate new XDomain object | |
1100 | * @tb: Domain where the XDomain belongs | |
1101 | * @parent: Parent device (the switch through the connection to the | |
1102 | * other domain is reached). | |
1103 | * @route: Route string used to reach the other domain | |
1104 | * @local_uuid: Our local domain UUID | |
1105 | * @remote_uuid: UUID of the other domain | |
1106 | * | |
1107 | * Allocates new XDomain structure and returns pointer to that. The | |
1108 | * object must be released by calling tb_xdomain_put(). | |
1109 | */ | |
1110 | struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, | |
1111 | u64 route, const uuid_t *local_uuid, | |
1112 | const uuid_t *remote_uuid) | |
1113 | { | |
1114 | struct tb_xdomain *xd; | |
1115 | ||
1116 | xd = kzalloc(sizeof(*xd), GFP_KERNEL); | |
1117 | if (!xd) | |
1118 | return NULL; | |
1119 | ||
1120 | xd->tb = tb; | |
1121 | xd->route = route; | |
1122 | ida_init(&xd->service_ids); | |
1123 | mutex_init(&xd->lock); | |
1124 | INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties); | |
1125 | INIT_DELAYED_WORK(&xd->properties_changed_work, | |
1126 | tb_xdomain_properties_changed); | |
1127 | ||
1128 | xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL); | |
1129 | if (!xd->local_uuid) | |
1130 | goto err_free; | |
1131 | ||
1132 | xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), GFP_KERNEL); | |
1133 | if (!xd->remote_uuid) | |
1134 | goto err_free_local_uuid; | |
1135 | ||
1136 | device_initialize(&xd->dev); | |
1137 | xd->dev.parent = get_device(parent); | |
1138 | xd->dev.bus = &tb_bus_type; | |
1139 | xd->dev.type = &tb_xdomain_type; | |
1140 | xd->dev.groups = xdomain_attr_groups; | |
1141 | dev_set_name(&xd->dev, "%u-%llx", tb->index, route); | |
1142 | ||
2d8ff0b5 MW |
1143 | /* |
1144 | * This keeps the DMA powered on as long as we have active | |
1145 | * connection to another host. | |
1146 | */ | |
1147 | pm_runtime_set_active(&xd->dev); | |
1148 | pm_runtime_get_noresume(&xd->dev); | |
1149 | pm_runtime_enable(&xd->dev); | |
1150 | ||
d1ff7024 MW |
1151 | return xd; |
1152 | ||
1153 | err_free_local_uuid: | |
1154 | kfree(xd->local_uuid); | |
1155 | err_free: | |
1156 | kfree(xd); | |
1157 | ||
1158 | return NULL; | |
1159 | } | |
1160 | ||
1161 | /** | |
1162 | * tb_xdomain_add() - Add XDomain to the bus | |
1163 | * @xd: XDomain to add | |
1164 | * | |
1165 | * This function starts XDomain discovery protocol handshake and | |
1166 | * eventually adds the XDomain to the bus. After calling this function | |
1167 | * the caller needs to call tb_xdomain_remove() in order to remove and | |
1168 | * release the object regardless whether the handshake succeeded or not. | |
1169 | */ | |
1170 | void tb_xdomain_add(struct tb_xdomain *xd) | |
1171 | { | |
1172 | /* Start exchanging properties with the other host */ | |
1173 | start_handshake(xd); | |
1174 | } | |
1175 | ||
1176 | static int unregister_service(struct device *dev, void *data) | |
1177 | { | |
1178 | device_unregister(dev); | |
1179 | return 0; | |
1180 | } | |
1181 | ||
1182 | /** | |
1183 | * tb_xdomain_remove() - Remove XDomain from the bus | |
1184 | * @xd: XDomain to remove | |
1185 | * | |
1186 | * This will stop all ongoing configuration work and remove the XDomain | |
1187 | * along with any services from the bus. When the last reference to @xd | |
1188 | * is released the object will be released as well. | |
1189 | */ | |
1190 | void tb_xdomain_remove(struct tb_xdomain *xd) | |
1191 | { | |
1192 | stop_handshake(xd); | |
1193 | ||
1194 | device_for_each_child_reverse(&xd->dev, xd, unregister_service); | |
1195 | ||
2d8ff0b5 MW |
1196 | /* |
1197 | * Undo runtime PM here explicitly because it is possible that | |
1198 | * the XDomain was never added to the bus and thus device_del() | |
1199 | * is not called for it (device_del() would handle this otherwise). | |
1200 | */ | |
1201 | pm_runtime_disable(&xd->dev); | |
1202 | pm_runtime_put_noidle(&xd->dev); | |
1203 | pm_runtime_set_suspended(&xd->dev); | |
1204 | ||
d1ff7024 MW |
1205 | if (!device_is_registered(&xd->dev)) |
1206 | put_device(&xd->dev); | |
1207 | else | |
1208 | device_unregister(&xd->dev); | |
1209 | } | |
1210 | ||
1211 | /** | |
1212 | * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection | |
1213 | * @xd: XDomain connection | |
1214 | * @transmit_path: HopID of the transmit path the other end is using to | |
1215 | * send packets | |
1216 | * @transmit_ring: DMA ring used to receive packets from the other end | |
1217 | * @receive_path: HopID of the receive path the other end is using to | |
1218 | * receive packets | |
1219 | * @receive_ring: DMA ring used to send packets to the other end | |
1220 | * | |
1221 | * The function enables DMA paths accordingly so that after successful | |
1222 | * return the caller can send and receive packets using high-speed DMA | |
1223 | * path. | |
1224 | * | |
1225 | * Return: %0 in case of success and negative errno in case of error | |
1226 | */ | |
1227 | int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, | |
1228 | u16 transmit_ring, u16 receive_path, | |
1229 | u16 receive_ring) | |
1230 | { | |
1231 | int ret; | |
1232 | ||
1233 | mutex_lock(&xd->lock); | |
1234 | ||
1235 | if (xd->transmit_path) { | |
1236 | ret = xd->transmit_path == transmit_path ? 0 : -EBUSY; | |
1237 | goto exit_unlock; | |
1238 | } | |
1239 | ||
1240 | xd->transmit_path = transmit_path; | |
1241 | xd->transmit_ring = transmit_ring; | |
1242 | xd->receive_path = receive_path; | |
1243 | xd->receive_ring = receive_ring; | |
1244 | ||
1245 | ret = tb_domain_approve_xdomain_paths(xd->tb, xd); | |
1246 | ||
1247 | exit_unlock: | |
1248 | mutex_unlock(&xd->lock); | |
1249 | ||
1250 | return ret; | |
1251 | } | |
1252 | EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths); | |
1253 | ||
1254 | /** | |
1255 | * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection | |
1256 | * @xd: XDomain connection | |
1257 | * | |
1258 | * This does the opposite of tb_xdomain_enable_paths(). After call to | |
1259 | * this the caller is not expected to use the rings anymore. | |
1260 | * | |
1261 | * Return: %0 in case of success and negative errno in case of error | |
1262 | */ | |
1263 | int tb_xdomain_disable_paths(struct tb_xdomain *xd) | |
1264 | { | |
1265 | int ret = 0; | |
1266 | ||
1267 | mutex_lock(&xd->lock); | |
1268 | if (xd->transmit_path) { | |
1269 | xd->transmit_path = 0; | |
1270 | xd->transmit_ring = 0; | |
1271 | xd->receive_path = 0; | |
1272 | xd->receive_ring = 0; | |
1273 | ||
1274 | ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd); | |
1275 | } | |
1276 | mutex_unlock(&xd->lock); | |
1277 | ||
1278 | return ret; | |
1279 | } | |
1280 | EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths); | |
1281 | ||
1282 | struct tb_xdomain_lookup { | |
1283 | const uuid_t *uuid; | |
1284 | u8 link; | |
1285 | u8 depth; | |
484cb153 | 1286 | u64 route; |
d1ff7024 MW |
1287 | }; |
1288 | ||
1289 | static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw, | |
1290 | const struct tb_xdomain_lookup *lookup) | |
1291 | { | |
1292 | int i; | |
1293 | ||
1294 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1295 | struct tb_port *port = &sw->ports[i]; | |
1296 | struct tb_xdomain *xd; | |
1297 | ||
d1ff7024 MW |
1298 | if (port->xdomain) { |
1299 | xd = port->xdomain; | |
1300 | ||
1301 | if (lookup->uuid) { | |
1302 | if (uuid_equal(xd->remote_uuid, lookup->uuid)) | |
1303 | return xd; | |
484cb153 RM |
1304 | } else if (lookup->link && |
1305 | lookup->link == xd->link && | |
d1ff7024 MW |
1306 | lookup->depth == xd->depth) { |
1307 | return xd; | |
484cb153 RM |
1308 | } else if (lookup->route && |
1309 | lookup->route == xd->route) { | |
1310 | return xd; | |
d1ff7024 | 1311 | } |
dfe40ca4 | 1312 | } else if (tb_port_has_remote(port)) { |
d1ff7024 MW |
1313 | xd = switch_find_xdomain(port->remote->sw, lookup); |
1314 | if (xd) | |
1315 | return xd; | |
1316 | } | |
1317 | } | |
1318 | ||
1319 | return NULL; | |
1320 | } | |
1321 | ||
1322 | /** | |
1323 | * tb_xdomain_find_by_uuid() - Find an XDomain by UUID | |
1324 | * @tb: Domain where the XDomain belongs to | |
1325 | * @uuid: UUID to look for | |
1326 | * | |
1327 | * Finds XDomain by walking through the Thunderbolt topology below @tb. | |
1328 | * The returned XDomain will have its reference count increased so the | |
1329 | * caller needs to call tb_xdomain_put() when it is done with the | |
1330 | * object. | |
1331 | * | |
1332 | * This will find all XDomains including the ones that are not yet added | |
1333 | * to the bus (handshake is still in progress). | |
1334 | * | |
1335 | * The caller needs to hold @tb->lock. | |
1336 | */ | |
1337 | struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid) | |
1338 | { | |
1339 | struct tb_xdomain_lookup lookup; | |
1340 | struct tb_xdomain *xd; | |
1341 | ||
1342 | memset(&lookup, 0, sizeof(lookup)); | |
1343 | lookup.uuid = uuid; | |
1344 | ||
1345 | xd = switch_find_xdomain(tb->root_switch, &lookup); | |
484cb153 | 1346 | return tb_xdomain_get(xd); |
d1ff7024 MW |
1347 | } |
1348 | EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid); | |
1349 | ||
1350 | /** | |
1351 | * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth | |
1352 | * @tb: Domain where the XDomain belongs to | |
1353 | * @link: Root switch link number | |
1354 | * @depth: Depth in the link | |
1355 | * | |
1356 | * Finds XDomain by walking through the Thunderbolt topology below @tb. | |
1357 | * The returned XDomain will have its reference count increased so the | |
1358 | * caller needs to call tb_xdomain_put() when it is done with the | |
1359 | * object. | |
1360 | * | |
1361 | * This will find all XDomains including the ones that are not yet added | |
1362 | * to the bus (handshake is still in progress). | |
1363 | * | |
1364 | * The caller needs to hold @tb->lock. | |
1365 | */ | |
1366 | struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, | |
1367 | u8 depth) | |
1368 | { | |
1369 | struct tb_xdomain_lookup lookup; | |
1370 | struct tb_xdomain *xd; | |
1371 | ||
1372 | memset(&lookup, 0, sizeof(lookup)); | |
1373 | lookup.link = link; | |
1374 | lookup.depth = depth; | |
1375 | ||
1376 | xd = switch_find_xdomain(tb->root_switch, &lookup); | |
484cb153 RM |
1377 | return tb_xdomain_get(xd); |
1378 | } | |
d1ff7024 | 1379 | |
484cb153 RM |
1380 | /** |
1381 | * tb_xdomain_find_by_route() - Find an XDomain by route string | |
1382 | * @tb: Domain where the XDomain belongs to | |
1383 | * @route: XDomain route string | |
1384 | * | |
1385 | * Finds XDomain by walking through the Thunderbolt topology below @tb. | |
1386 | * The returned XDomain will have its reference count increased so the | |
1387 | * caller needs to call tb_xdomain_put() when it is done with the | |
1388 | * object. | |
1389 | * | |
1390 | * This will find all XDomains including the ones that are not yet added | |
1391 | * to the bus (handshake is still in progress). | |
1392 | * | |
1393 | * The caller needs to hold @tb->lock. | |
1394 | */ | |
1395 | struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route) | |
1396 | { | |
1397 | struct tb_xdomain_lookup lookup; | |
1398 | struct tb_xdomain *xd; | |
1399 | ||
1400 | memset(&lookup, 0, sizeof(lookup)); | |
1401 | lookup.route = route; | |
1402 | ||
1403 | xd = switch_find_xdomain(tb->root_switch, &lookup); | |
1404 | return tb_xdomain_get(xd); | |
d1ff7024 | 1405 | } |
484cb153 | 1406 | EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route); |
d1ff7024 MW |
1407 | |
1408 | bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type, | |
1409 | const void *buf, size_t size) | |
1410 | { | |
1411 | const struct tb_protocol_handler *handler, *tmp; | |
1412 | const struct tb_xdp_header *hdr = buf; | |
1413 | unsigned int length; | |
1414 | int ret = 0; | |
1415 | ||
1416 | /* We expect the packet is at least size of the header */ | |
1417 | length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK; | |
1418 | if (length != size / 4 - sizeof(hdr->xd_hdr) / 4) | |
1419 | return true; | |
1420 | if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4) | |
1421 | return true; | |
1422 | ||
1423 | /* | |
1424 | * Handle XDomain discovery protocol packets directly here. For | |
1425 | * other protocols (based on their UUID) we call registered | |
1426 | * handlers in turn. | |
1427 | */ | |
1428 | if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) { | |
48f40b96 AP |
1429 | if (type == TB_CFG_PKG_XDOMAIN_REQ) |
1430 | return tb_xdp_schedule_request(tb, hdr, size); | |
d1ff7024 MW |
1431 | return false; |
1432 | } | |
1433 | ||
1434 | mutex_lock(&xdomain_lock); | |
1435 | list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) { | |
1436 | if (!uuid_equal(&hdr->uuid, handler->uuid)) | |
1437 | continue; | |
1438 | ||
1439 | mutex_unlock(&xdomain_lock); | |
1440 | ret = handler->callback(buf, size, handler->data); | |
1441 | mutex_lock(&xdomain_lock); | |
1442 | ||
1443 | if (ret) | |
1444 | break; | |
1445 | } | |
1446 | mutex_unlock(&xdomain_lock); | |
1447 | ||
1448 | return ret > 0; | |
1449 | } | |
1450 | ||
1451 | static int rebuild_property_block(void) | |
1452 | { | |
1453 | u32 *block, len; | |
1454 | int ret; | |
1455 | ||
1456 | ret = tb_property_format_dir(xdomain_property_dir, NULL, 0); | |
1457 | if (ret < 0) | |
1458 | return ret; | |
1459 | ||
1460 | len = ret; | |
1461 | ||
1462 | block = kcalloc(len, sizeof(u32), GFP_KERNEL); | |
1463 | if (!block) | |
1464 | return -ENOMEM; | |
1465 | ||
1466 | ret = tb_property_format_dir(xdomain_property_dir, block, len); | |
1467 | if (ret) { | |
1468 | kfree(block); | |
1469 | return ret; | |
1470 | } | |
1471 | ||
1472 | kfree(xdomain_property_block); | |
1473 | xdomain_property_block = block; | |
1474 | xdomain_property_block_len = len; | |
1475 | xdomain_property_block_gen++; | |
1476 | ||
1477 | return 0; | |
1478 | } | |
1479 | ||
1480 | static int update_xdomain(struct device *dev, void *data) | |
1481 | { | |
1482 | struct tb_xdomain *xd; | |
1483 | ||
1484 | xd = tb_to_xdomain(dev); | |
1485 | if (xd) { | |
1486 | queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, | |
1487 | msecs_to_jiffies(50)); | |
1488 | } | |
1489 | ||
1490 | return 0; | |
1491 | } | |
1492 | ||
1493 | static void update_all_xdomains(void) | |
1494 | { | |
1495 | bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain); | |
1496 | } | |
1497 | ||
1498 | static bool remove_directory(const char *key, const struct tb_property_dir *dir) | |
1499 | { | |
1500 | struct tb_property *p; | |
1501 | ||
1502 | p = tb_property_find(xdomain_property_dir, key, | |
1503 | TB_PROPERTY_TYPE_DIRECTORY); | |
1504 | if (p && p->value.dir == dir) { | |
1505 | tb_property_remove(p); | |
1506 | return true; | |
1507 | } | |
1508 | return false; | |
1509 | } | |
1510 | ||
1511 | /** | |
1512 | * tb_register_property_dir() - Register property directory to the host | |
1513 | * @key: Key (name) of the directory to add | |
1514 | * @dir: Directory to add | |
1515 | * | |
1516 | * Service drivers can use this function to add new property directory | |
1517 | * to the host available properties. The other connected hosts are | |
1518 | * notified so they can re-read properties of this host if they are | |
1519 | * interested. | |
1520 | * | |
1521 | * Return: %0 on success and negative errno on failure | |
1522 | */ | |
1523 | int tb_register_property_dir(const char *key, struct tb_property_dir *dir) | |
1524 | { | |
1525 | int ret; | |
1526 | ||
acb40d84 MW |
1527 | if (WARN_ON(!xdomain_property_dir)) |
1528 | return -EAGAIN; | |
1529 | ||
d1ff7024 MW |
1530 | if (!key || strlen(key) > 8) |
1531 | return -EINVAL; | |
1532 | ||
1533 | mutex_lock(&xdomain_lock); | |
1534 | if (tb_property_find(xdomain_property_dir, key, | |
1535 | TB_PROPERTY_TYPE_DIRECTORY)) { | |
1536 | ret = -EEXIST; | |
1537 | goto err_unlock; | |
1538 | } | |
1539 | ||
1540 | ret = tb_property_add_dir(xdomain_property_dir, key, dir); | |
1541 | if (ret) | |
1542 | goto err_unlock; | |
1543 | ||
1544 | ret = rebuild_property_block(); | |
1545 | if (ret) { | |
1546 | remove_directory(key, dir); | |
1547 | goto err_unlock; | |
1548 | } | |
1549 | ||
1550 | mutex_unlock(&xdomain_lock); | |
1551 | update_all_xdomains(); | |
1552 | return 0; | |
1553 | ||
1554 | err_unlock: | |
1555 | mutex_unlock(&xdomain_lock); | |
1556 | return ret; | |
1557 | } | |
1558 | EXPORT_SYMBOL_GPL(tb_register_property_dir); | |
1559 | ||
1560 | /** | |
1561 | * tb_unregister_property_dir() - Removes property directory from host | |
1562 | * @key: Key (name) of the directory | |
1563 | * @dir: Directory to remove | |
1564 | * | |
1565 | * This will remove the existing directory from this host and notify the | |
1566 | * connected hosts about the change. | |
1567 | */ | |
1568 | void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir) | |
1569 | { | |
1570 | int ret = 0; | |
1571 | ||
1572 | mutex_lock(&xdomain_lock); | |
1573 | if (remove_directory(key, dir)) | |
1574 | ret = rebuild_property_block(); | |
1575 | mutex_unlock(&xdomain_lock); | |
1576 | ||
1577 | if (!ret) | |
1578 | update_all_xdomains(); | |
1579 | } | |
1580 | EXPORT_SYMBOL_GPL(tb_unregister_property_dir); | |
1581 | ||
1582 | int tb_xdomain_init(void) | |
1583 | { | |
1584 | int ret; | |
1585 | ||
1586 | xdomain_property_dir = tb_property_create_dir(NULL); | |
1587 | if (!xdomain_property_dir) | |
1588 | return -ENOMEM; | |
1589 | ||
1590 | /* | |
1591 | * Initialize standard set of properties without any service | |
1592 | * directories. Those will be added by service drivers | |
1593 | * themselves when they are loaded. | |
1594 | */ | |
1595 | tb_property_add_immediate(xdomain_property_dir, "vendorid", | |
1596 | PCI_VENDOR_ID_INTEL); | |
1597 | tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp."); | |
1598 | tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1); | |
1599 | tb_property_add_text(xdomain_property_dir, "deviceid", | |
1600 | utsname()->nodename); | |
1601 | tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100); | |
1602 | ||
1603 | ret = rebuild_property_block(); | |
1604 | if (ret) { | |
1605 | tb_property_free_dir(xdomain_property_dir); | |
1606 | xdomain_property_dir = NULL; | |
1607 | } | |
1608 | ||
1609 | return ret; | |
1610 | } | |
1611 | ||
1612 | void tb_xdomain_exit(void) | |
1613 | { | |
1614 | kfree(xdomain_property_block); | |
1615 | tb_property_free_dir(xdomain_property_dir); | |
1616 | } |