]> Git Repo - linux.git/blob - drivers/thunderbolt/xdomain.c
tipc: guarantee that group broadcast doesn't bypass group unicast
[linux.git] / drivers / thunderbolt / xdomain.c
1 /*
2  * Thunderbolt XDomain discovery protocol support
3  *
4  * Copyright (C) 2017, Intel Corporation
5  * Authors: Michael Jamet <[email protected]>
6  *          Mika Westerberg <[email protected]>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/device.h>
14 #include <linux/kmod.h>
15 #include <linux/module.h>
16 #include <linux/utsname.h>
17 #include <linux/uuid.h>
18 #include <linux/workqueue.h>
19
20 #include "tb.h"
21
22 #define XDOMAIN_DEFAULT_TIMEOUT                 5000 /* ms */
23 #define XDOMAIN_PROPERTIES_RETRIES              60
24 #define XDOMAIN_PROPERTIES_CHANGED_RETRIES      10
25
26 struct xdomain_request_work {
27         struct work_struct work;
28         struct tb_xdp_header *pkg;
29         struct tb *tb;
30 };
31
32 /* Serializes access to the properties and protocol handlers below */
33 static DEFINE_MUTEX(xdomain_lock);
34
35 /* Properties exposed to the remote domains */
36 static struct tb_property_dir *xdomain_property_dir;
37 static u32 *xdomain_property_block;
38 static u32 xdomain_property_block_len;
39 static u32 xdomain_property_block_gen;
40
41 /* Additional protocol handlers */
42 static LIST_HEAD(protocol_handlers);
43
44 /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
45 static const uuid_t tb_xdp_uuid =
46         UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
47                   0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
48
49 static bool tb_xdomain_match(const struct tb_cfg_request *req,
50                              const struct ctl_pkg *pkg)
51 {
52         switch (pkg->frame.eof) {
53         case TB_CFG_PKG_ERROR:
54                 return true;
55
56         case TB_CFG_PKG_XDOMAIN_RESP: {
57                 const struct tb_xdp_header *res_hdr = pkg->buffer;
58                 const struct tb_xdp_header *req_hdr = req->request;
59                 u8 req_seq, res_seq;
60
61                 if (pkg->frame.size < req->response_size / 4)
62                         return false;
63
64                 /* Make sure route matches */
65                 if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
66                      req_hdr->xd_hdr.route_hi)
67                         return false;
68                 if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
69                         return false;
70
71                 /* Then check that the sequence number matches */
72                 res_seq = res_hdr->xd_hdr.length_sn & TB_XDOMAIN_SN_MASK;
73                 res_seq >>= TB_XDOMAIN_SN_SHIFT;
74                 req_seq = req_hdr->xd_hdr.length_sn & TB_XDOMAIN_SN_MASK;
75                 req_seq >>= TB_XDOMAIN_SN_SHIFT;
76                 if (res_seq != req_seq)
77                         return false;
78
79                 /* Check that the XDomain protocol matches */
80                 if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
81                         return false;
82
83                 return true;
84         }
85
86         default:
87                 return false;
88         }
89 }
90
91 static bool tb_xdomain_copy(struct tb_cfg_request *req,
92                             const struct ctl_pkg *pkg)
93 {
94         memcpy(req->response, pkg->buffer, req->response_size);
95         req->result.err = 0;
96         return true;
97 }
98
99 static void response_ready(void *data)
100 {
101         tb_cfg_request_put(data);
102 }
103
104 static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
105                                  size_t size, enum tb_cfg_pkg_type type)
106 {
107         struct tb_cfg_request *req;
108
109         req = tb_cfg_request_alloc();
110         if (!req)
111                 return -ENOMEM;
112
113         req->match = tb_xdomain_match;
114         req->copy = tb_xdomain_copy;
115         req->request = response;
116         req->request_size = size;
117         req->request_type = type;
118
119         return tb_cfg_request(ctl, req, response_ready, req);
120 }
121
122 /**
123  * tb_xdomain_response() - Send a XDomain response message
124  * @xd: XDomain to send the message
125  * @response: Response to send
126  * @size: Size of the response
127  * @type: PDF type of the response
128  *
129  * This can be used to send a XDomain response message to the other
130  * domain. No response for the message is expected.
131  *
132  * Return: %0 in case of success and negative errno in case of failure
133  */
134 int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
135                         size_t size, enum tb_cfg_pkg_type type)
136 {
137         return __tb_xdomain_response(xd->tb->ctl, response, size, type);
138 }
139 EXPORT_SYMBOL_GPL(tb_xdomain_response);
140
141 static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
142         size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
143         size_t response_size, enum tb_cfg_pkg_type response_type,
144         unsigned int timeout_msec)
145 {
146         struct tb_cfg_request *req;
147         struct tb_cfg_result res;
148
149         req = tb_cfg_request_alloc();
150         if (!req)
151                 return -ENOMEM;
152
153         req->match = tb_xdomain_match;
154         req->copy = tb_xdomain_copy;
155         req->request = request;
156         req->request_size = request_size;
157         req->request_type = request_type;
158         req->response = response;
159         req->response_size = response_size;
160         req->response_type = response_type;
161
162         res = tb_cfg_request_sync(ctl, req, timeout_msec);
163
164         tb_cfg_request_put(req);
165
166         return res.err == 1 ? -EIO : res.err;
167 }
168
169 /**
170  * tb_xdomain_request() - Send a XDomain request
171  * @xd: XDomain to send the request
172  * @request: Request to send
173  * @request_size: Size of the request in bytes
174  * @request_type: PDF type of the request
175  * @response: Response is copied here
176  * @response_size: Expected size of the response in bytes
177  * @response_type: Expected PDF type of the response
178  * @timeout_msec: Timeout in milliseconds to wait for the response
179  *
180  * This function can be used to send XDomain control channel messages to
181  * the other domain. The function waits until the response is received
182  * or when timeout triggers. Whichever comes first.
183  *
184  * Return: %0 in case of success and negative errno in case of failure
185  */
186 int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
187         size_t request_size, enum tb_cfg_pkg_type request_type,
188         void *response, size_t response_size,
189         enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
190 {
191         return __tb_xdomain_request(xd->tb->ctl, request, request_size,
192                                     request_type, response, response_size,
193                                     response_type, timeout_msec);
194 }
195 EXPORT_SYMBOL_GPL(tb_xdomain_request);
196
197 static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
198         u8 sequence, enum tb_xdp_type type, size_t size)
199 {
200         u32 length_sn;
201
202         length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
203         length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
204
205         hdr->xd_hdr.route_hi = upper_32_bits(route);
206         hdr->xd_hdr.route_lo = lower_32_bits(route);
207         hdr->xd_hdr.length_sn = length_sn;
208         hdr->type = type;
209         memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
210 }
211
212 static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
213 {
214         const struct tb_xdp_error_response *error;
215
216         if (hdr->type != ERROR_RESPONSE)
217                 return 0;
218
219         error = (const struct tb_xdp_error_response *)hdr;
220
221         switch (error->error) {
222         case ERROR_UNKNOWN_PACKET:
223         case ERROR_UNKNOWN_DOMAIN:
224                 return -EIO;
225         case ERROR_NOT_SUPPORTED:
226                 return -ENOTSUPP;
227         case ERROR_NOT_READY:
228                 return -EAGAIN;
229         default:
230                 break;
231         }
232
233         return 0;
234 }
235
236 static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
237                                  enum tb_xdp_error error)
238 {
239         struct tb_xdp_error_response res;
240
241         memset(&res, 0, sizeof(res));
242         tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
243                            sizeof(res));
244         res.error = error;
245
246         return __tb_xdomain_response(ctl, &res, sizeof(res),
247                                      TB_CFG_PKG_XDOMAIN_RESP);
248 }
249
250 static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
251         const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
252         u32 **block, u32 *generation)
253 {
254         struct tb_xdp_properties_response *res;
255         struct tb_xdp_properties req;
256         u16 data_len, len;
257         size_t total_size;
258         u32 *data = NULL;
259         int ret;
260
261         total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
262         res = kzalloc(total_size, GFP_KERNEL);
263         if (!res)
264                 return -ENOMEM;
265
266         memset(&req, 0, sizeof(req));
267         tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
268                            sizeof(req));
269         memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
270         memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
271
272         len = 0;
273         data_len = 0;
274
275         do {
276                 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
277                                            TB_CFG_PKG_XDOMAIN_REQ, res,
278                                            total_size, TB_CFG_PKG_XDOMAIN_RESP,
279                                            XDOMAIN_DEFAULT_TIMEOUT);
280                 if (ret)
281                         goto err;
282
283                 ret = tb_xdp_handle_error(&res->hdr);
284                 if (ret)
285                         goto err;
286
287                 /*
288                  * Package length includes the whole payload without the
289                  * XDomain header. Validate first that the package is at
290                  * least size of the response structure.
291                  */
292                 len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
293                 if (len < sizeof(*res) / 4) {
294                         ret = -EINVAL;
295                         goto err;
296                 }
297
298                 len += sizeof(res->hdr.xd_hdr) / 4;
299                 len -= sizeof(*res) / 4;
300
301                 if (res->offset != req.offset) {
302                         ret = -EINVAL;
303                         goto err;
304                 }
305
306                 /*
307                  * First time allocate block that has enough space for
308                  * the whole properties block.
309                  */
310                 if (!data) {
311                         data_len = res->data_length;
312                         if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
313                                 ret = -E2BIG;
314                                 goto err;
315                         }
316
317                         data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
318                         if (!data) {
319                                 ret = -ENOMEM;
320                                 goto err;
321                         }
322                 }
323
324                 memcpy(data + req.offset, res->data, len * 4);
325                 req.offset += len;
326         } while (!data_len || req.offset < data_len);
327
328         *block = data;
329         *generation = res->generation;
330
331         kfree(res);
332
333         return data_len;
334
335 err:
336         kfree(data);
337         kfree(res);
338
339         return ret;
340 }
341
342 static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
343         u64 route, u8 sequence, const uuid_t *src_uuid,
344         const struct tb_xdp_properties *req)
345 {
346         struct tb_xdp_properties_response *res;
347         size_t total_size;
348         u16 len;
349         int ret;
350
351         /*
352          * Currently we expect all requests to be directed to us. The
353          * protocol supports forwarding, though which we might add
354          * support later on.
355          */
356         if (!uuid_equal(src_uuid, &req->dst_uuid)) {
357                 tb_xdp_error_response(ctl, route, sequence,
358                                       ERROR_UNKNOWN_DOMAIN);
359                 return 0;
360         }
361
362         mutex_lock(&xdomain_lock);
363
364         if (req->offset >= xdomain_property_block_len) {
365                 mutex_unlock(&xdomain_lock);
366                 return -EINVAL;
367         }
368
369         len = xdomain_property_block_len - req->offset;
370         len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
371         total_size = sizeof(*res) + len * 4;
372
373         res = kzalloc(total_size, GFP_KERNEL);
374         if (!res) {
375                 mutex_unlock(&xdomain_lock);
376                 return -ENOMEM;
377         }
378
379         tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE,
380                            total_size);
381         res->generation = xdomain_property_block_gen;
382         res->data_length = xdomain_property_block_len;
383         res->offset = req->offset;
384         uuid_copy(&res->src_uuid, src_uuid);
385         uuid_copy(&res->dst_uuid, &req->src_uuid);
386         memcpy(res->data, &xdomain_property_block[req->offset], len * 4);
387
388         mutex_unlock(&xdomain_lock);
389
390         ret = __tb_xdomain_response(ctl, res, total_size,
391                                     TB_CFG_PKG_XDOMAIN_RESP);
392
393         kfree(res);
394         return ret;
395 }
396
397 static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
398                                              int retry, const uuid_t *uuid)
399 {
400         struct tb_xdp_properties_changed_response res;
401         struct tb_xdp_properties_changed req;
402         int ret;
403
404         memset(&req, 0, sizeof(req));
405         tb_xdp_fill_header(&req.hdr, route, retry % 4,
406                            PROPERTIES_CHANGED_REQUEST, sizeof(req));
407         uuid_copy(&req.src_uuid, uuid);
408
409         memset(&res, 0, sizeof(res));
410         ret = __tb_xdomain_request(ctl, &req, sizeof(req),
411                                    TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
412                                    TB_CFG_PKG_XDOMAIN_RESP,
413                                    XDOMAIN_DEFAULT_TIMEOUT);
414         if (ret)
415                 return ret;
416
417         return tb_xdp_handle_error(&res.hdr);
418 }
419
420 static int
421 tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
422 {
423         struct tb_xdp_properties_changed_response res;
424
425         memset(&res, 0, sizeof(res));
426         tb_xdp_fill_header(&res.hdr, route, sequence,
427                            PROPERTIES_CHANGED_RESPONSE, sizeof(res));
428         return __tb_xdomain_response(ctl, &res, sizeof(res),
429                                      TB_CFG_PKG_XDOMAIN_RESP);
430 }
431
432 /**
433  * tb_register_protocol_handler() - Register protocol handler
434  * @handler: Handler to register
435  *
436  * This allows XDomain service drivers to hook into incoming XDomain
437  * messages. After this function is called the service driver needs to
438  * be able to handle calls to callback whenever a package with the
439  * registered protocol is received.
440  */
441 int tb_register_protocol_handler(struct tb_protocol_handler *handler)
442 {
443         if (!handler->uuid || !handler->callback)
444                 return -EINVAL;
445         if (uuid_equal(handler->uuid, &tb_xdp_uuid))
446                 return -EINVAL;
447
448         mutex_lock(&xdomain_lock);
449         list_add_tail(&handler->list, &protocol_handlers);
450         mutex_unlock(&xdomain_lock);
451
452         return 0;
453 }
454 EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
455
456 /**
457  * tb_unregister_protocol_handler() - Unregister protocol handler
458  * @handler: Handler to unregister
459  *
460  * Removes the previously registered protocol handler.
461  */
462 void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
463 {
464         mutex_lock(&xdomain_lock);
465         list_del_init(&handler->list);
466         mutex_unlock(&xdomain_lock);
467 }
468 EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
469
470 static void tb_xdp_handle_request(struct work_struct *work)
471 {
472         struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
473         const struct tb_xdp_header *pkg = xw->pkg;
474         const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
475         struct tb *tb = xw->tb;
476         struct tb_ctl *ctl = tb->ctl;
477         const uuid_t *uuid;
478         int ret = 0;
479         u8 sequence;
480         u64 route;
481
482         route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
483         sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
484         sequence >>= TB_XDOMAIN_SN_SHIFT;
485
486         mutex_lock(&tb->lock);
487         if (tb->root_switch)
488                 uuid = tb->root_switch->uuid;
489         else
490                 uuid = NULL;
491         mutex_unlock(&tb->lock);
492
493         if (!uuid) {
494                 tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
495                 goto out;
496         }
497
498         switch (pkg->type) {
499         case PROPERTIES_REQUEST:
500                 ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
501                         (const struct tb_xdp_properties *)pkg);
502                 break;
503
504         case PROPERTIES_CHANGED_REQUEST: {
505                 const struct tb_xdp_properties_changed *xchg =
506                         (const struct tb_xdp_properties_changed *)pkg;
507                 struct tb_xdomain *xd;
508
509                 ret = tb_xdp_properties_changed_response(ctl, route, sequence);
510
511                 /*
512                  * Since the properties have been changed, let's update
513                  * the xdomain related to this connection as well in
514                  * case there is a change in services it offers.
515                  */
516                 xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid);
517                 if (xd) {
518                         queue_delayed_work(tb->wq, &xd->get_properties_work,
519                                            msecs_to_jiffies(50));
520                         tb_xdomain_put(xd);
521                 }
522
523                 break;
524         }
525
526         default:
527                 break;
528         }
529
530         if (ret) {
531                 tb_warn(tb, "failed to send XDomain response for %#x\n",
532                         pkg->type);
533         }
534
535 out:
536         kfree(xw->pkg);
537         kfree(xw);
538 }
539
540 static void
541 tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
542                         size_t size)
543 {
544         struct xdomain_request_work *xw;
545
546         xw = kmalloc(sizeof(*xw), GFP_KERNEL);
547         if (!xw)
548                 return;
549
550         INIT_WORK(&xw->work, tb_xdp_handle_request);
551         xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
552         xw->tb = tb;
553
554         queue_work(tb->wq, &xw->work);
555 }
556
557 /**
558  * tb_register_service_driver() - Register XDomain service driver
559  * @drv: Driver to register
560  *
561  * Registers new service driver from @drv to the bus.
562  */
563 int tb_register_service_driver(struct tb_service_driver *drv)
564 {
565         drv->driver.bus = &tb_bus_type;
566         return driver_register(&drv->driver);
567 }
568 EXPORT_SYMBOL_GPL(tb_register_service_driver);
569
570 /**
571  * tb_unregister_service_driver() - Unregister XDomain service driver
572  * @xdrv: Driver to unregister
573  *
574  * Unregisters XDomain service driver from the bus.
575  */
576 void tb_unregister_service_driver(struct tb_service_driver *drv)
577 {
578         driver_unregister(&drv->driver);
579 }
580 EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
581
582 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
583                         char *buf)
584 {
585         struct tb_service *svc = container_of(dev, struct tb_service, dev);
586
587         /*
588          * It should be null terminated but anything else is pretty much
589          * allowed.
590          */
591         return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key);
592 }
593 static DEVICE_ATTR_RO(key);
594
595 static int get_modalias(struct tb_service *svc, char *buf, size_t size)
596 {
597         return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
598                         svc->prtcid, svc->prtcvers, svc->prtcrevs);
599 }
600
601 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
602                              char *buf)
603 {
604         struct tb_service *svc = container_of(dev, struct tb_service, dev);
605
606         /* Full buffer size except new line and null termination */
607         get_modalias(svc, buf, PAGE_SIZE - 2);
608         return sprintf(buf, "%s\n", buf);
609 }
610 static DEVICE_ATTR_RO(modalias);
611
612 static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
613                            char *buf)
614 {
615         struct tb_service *svc = container_of(dev, struct tb_service, dev);
616
617         return sprintf(buf, "%u\n", svc->prtcid);
618 }
619 static DEVICE_ATTR_RO(prtcid);
620
621 static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
622                              char *buf)
623 {
624         struct tb_service *svc = container_of(dev, struct tb_service, dev);
625
626         return sprintf(buf, "%u\n", svc->prtcvers);
627 }
628 static DEVICE_ATTR_RO(prtcvers);
629
630 static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
631                              char *buf)
632 {
633         struct tb_service *svc = container_of(dev, struct tb_service, dev);
634
635         return sprintf(buf, "%u\n", svc->prtcrevs);
636 }
637 static DEVICE_ATTR_RO(prtcrevs);
638
639 static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
640                              char *buf)
641 {
642         struct tb_service *svc = container_of(dev, struct tb_service, dev);
643
644         return sprintf(buf, "0x%08x\n", svc->prtcstns);
645 }
646 static DEVICE_ATTR_RO(prtcstns);
647
648 static struct attribute *tb_service_attrs[] = {
649         &dev_attr_key.attr,
650         &dev_attr_modalias.attr,
651         &dev_attr_prtcid.attr,
652         &dev_attr_prtcvers.attr,
653         &dev_attr_prtcrevs.attr,
654         &dev_attr_prtcstns.attr,
655         NULL,
656 };
657
658 static struct attribute_group tb_service_attr_group = {
659         .attrs = tb_service_attrs,
660 };
661
662 static const struct attribute_group *tb_service_attr_groups[] = {
663         &tb_service_attr_group,
664         NULL,
665 };
666
667 static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
668 {
669         struct tb_service *svc = container_of(dev, struct tb_service, dev);
670         char modalias[64];
671
672         get_modalias(svc, modalias, sizeof(modalias));
673         return add_uevent_var(env, "MODALIAS=%s", modalias);
674 }
675
676 static void tb_service_release(struct device *dev)
677 {
678         struct tb_service *svc = container_of(dev, struct tb_service, dev);
679         struct tb_xdomain *xd = tb_service_parent(svc);
680
681         ida_simple_remove(&xd->service_ids, svc->id);
682         kfree(svc->key);
683         kfree(svc);
684 }
685
686 struct device_type tb_service_type = {
687         .name = "thunderbolt_service",
688         .groups = tb_service_attr_groups,
689         .uevent = tb_service_uevent,
690         .release = tb_service_release,
691 };
692 EXPORT_SYMBOL_GPL(tb_service_type);
693
694 static int remove_missing_service(struct device *dev, void *data)
695 {
696         struct tb_xdomain *xd = data;
697         struct tb_service *svc;
698
699         svc = tb_to_service(dev);
700         if (!svc)
701                 return 0;
702
703         if (!tb_property_find(xd->properties, svc->key,
704                               TB_PROPERTY_TYPE_DIRECTORY))
705                 device_unregister(dev);
706
707         return 0;
708 }
709
710 static int find_service(struct device *dev, void *data)
711 {
712         const struct tb_property *p = data;
713         struct tb_service *svc;
714
715         svc = tb_to_service(dev);
716         if (!svc)
717                 return 0;
718
719         return !strcmp(svc->key, p->key);
720 }
721
722 static int populate_service(struct tb_service *svc,
723                             struct tb_property *property)
724 {
725         struct tb_property_dir *dir = property->value.dir;
726         struct tb_property *p;
727
728         /* Fill in standard properties */
729         p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
730         if (p)
731                 svc->prtcid = p->value.immediate;
732         p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
733         if (p)
734                 svc->prtcvers = p->value.immediate;
735         p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
736         if (p)
737                 svc->prtcrevs = p->value.immediate;
738         p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
739         if (p)
740                 svc->prtcstns = p->value.immediate;
741
742         svc->key = kstrdup(property->key, GFP_KERNEL);
743         if (!svc->key)
744                 return -ENOMEM;
745
746         return 0;
747 }
748
749 static void enumerate_services(struct tb_xdomain *xd)
750 {
751         struct tb_service *svc;
752         struct tb_property *p;
753         struct device *dev;
754
755         /*
756          * First remove all services that are not available anymore in
757          * the updated property block.
758          */
759         device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
760
761         /* Then re-enumerate properties creating new services as we go */
762         tb_property_for_each(xd->properties, p) {
763                 if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
764                         continue;
765
766                 /* If the service exists already we are fine */
767                 dev = device_find_child(&xd->dev, p, find_service);
768                 if (dev) {
769                         put_device(dev);
770                         continue;
771                 }
772
773                 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
774                 if (!svc)
775                         break;
776
777                 if (populate_service(svc, p)) {
778                         kfree(svc);
779                         break;
780                 }
781
782                 svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
783                 svc->dev.bus = &tb_bus_type;
784                 svc->dev.type = &tb_service_type;
785                 svc->dev.parent = &xd->dev;
786                 dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
787
788                 if (device_register(&svc->dev)) {
789                         put_device(&svc->dev);
790                         break;
791                 }
792         }
793 }
794
795 static int populate_properties(struct tb_xdomain *xd,
796                                struct tb_property_dir *dir)
797 {
798         const struct tb_property *p;
799
800         /* Required properties */
801         p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
802         if (!p)
803                 return -EINVAL;
804         xd->device = p->value.immediate;
805
806         p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
807         if (!p)
808                 return -EINVAL;
809         xd->vendor = p->value.immediate;
810
811         kfree(xd->device_name);
812         xd->device_name = NULL;
813         kfree(xd->vendor_name);
814         xd->vendor_name = NULL;
815
816         /* Optional properties */
817         p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
818         if (p)
819                 xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
820         p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
821         if (p)
822                 xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
823
824         return 0;
825 }
826
827 /* Called with @xd->lock held */
828 static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
829 {
830         if (!xd->resume)
831                 return;
832
833         xd->resume = false;
834         if (xd->transmit_path) {
835                 dev_dbg(&xd->dev, "re-establishing DMA path\n");
836                 tb_domain_approve_xdomain_paths(xd->tb, xd);
837         }
838 }
839
840 static void tb_xdomain_get_properties(struct work_struct *work)
841 {
842         struct tb_xdomain *xd = container_of(work, typeof(*xd),
843                                              get_properties_work.work);
844         struct tb_property_dir *dir;
845         struct tb *tb = xd->tb;
846         bool update = false;
847         u32 *block = NULL;
848         u32 gen = 0;
849         int ret;
850
851         ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
852                                         xd->remote_uuid, xd->properties_retries,
853                                         &block, &gen);
854         if (ret < 0) {
855                 if (xd->properties_retries-- > 0) {
856                         queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
857                                            msecs_to_jiffies(1000));
858                 } else {
859                         /* Give up now */
860                         dev_err(&xd->dev,
861                                 "failed read XDomain properties from %pUb\n",
862                                 xd->remote_uuid);
863                 }
864                 return;
865         }
866
867         xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
868
869         mutex_lock(&xd->lock);
870
871         /* Only accept newer generation properties */
872         if (xd->properties && gen <= xd->property_block_gen) {
873                 /*
874                  * On resume it is likely that the properties block is
875                  * not changed (unless the other end added or removed
876                  * services). However, we need to make sure the existing
877                  * DMA paths are restored properly.
878                  */
879                 tb_xdomain_restore_paths(xd);
880                 goto err_free_block;
881         }
882
883         dir = tb_property_parse_dir(block, ret);
884         if (!dir) {
885                 dev_err(&xd->dev, "failed to parse XDomain properties\n");
886                 goto err_free_block;
887         }
888
889         ret = populate_properties(xd, dir);
890         if (ret) {
891                 dev_err(&xd->dev, "missing XDomain properties in response\n");
892                 goto err_free_dir;
893         }
894
895         /* Release the existing one */
896         if (xd->properties) {
897                 tb_property_free_dir(xd->properties);
898                 update = true;
899         }
900
901         xd->properties = dir;
902         xd->property_block_gen = gen;
903
904         tb_xdomain_restore_paths(xd);
905
906         mutex_unlock(&xd->lock);
907
908         kfree(block);
909
910         /*
911          * Now the device should be ready enough so we can add it to the
912          * bus and let userspace know about it. If the device is already
913          * registered, we notify the userspace that it has changed.
914          */
915         if (!update) {
916                 if (device_add(&xd->dev)) {
917                         dev_err(&xd->dev, "failed to add XDomain device\n");
918                         return;
919                 }
920         } else {
921                 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
922         }
923
924         enumerate_services(xd);
925         return;
926
927 err_free_dir:
928         tb_property_free_dir(dir);
929 err_free_block:
930         kfree(block);
931         mutex_unlock(&xd->lock);
932 }
933
934 static void tb_xdomain_properties_changed(struct work_struct *work)
935 {
936         struct tb_xdomain *xd = container_of(work, typeof(*xd),
937                                              properties_changed_work.work);
938         int ret;
939
940         ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
941                                 xd->properties_changed_retries, xd->local_uuid);
942         if (ret) {
943                 if (xd->properties_changed_retries-- > 0)
944                         queue_delayed_work(xd->tb->wq,
945                                            &xd->properties_changed_work,
946                                            msecs_to_jiffies(1000));
947                 return;
948         }
949
950         xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
951 }
952
953 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
954                            char *buf)
955 {
956         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
957
958         return sprintf(buf, "%#x\n", xd->device);
959 }
960 static DEVICE_ATTR_RO(device);
961
962 static ssize_t
963 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
964 {
965         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
966         int ret;
967
968         if (mutex_lock_interruptible(&xd->lock))
969                 return -ERESTARTSYS;
970         ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
971         mutex_unlock(&xd->lock);
972
973         return ret;
974 }
975 static DEVICE_ATTR_RO(device_name);
976
977 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
978                            char *buf)
979 {
980         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
981
982         return sprintf(buf, "%#x\n", xd->vendor);
983 }
984 static DEVICE_ATTR_RO(vendor);
985
986 static ssize_t
987 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
988 {
989         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
990         int ret;
991
992         if (mutex_lock_interruptible(&xd->lock))
993                 return -ERESTARTSYS;
994         ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
995         mutex_unlock(&xd->lock);
996
997         return ret;
998 }
999 static DEVICE_ATTR_RO(vendor_name);
1000
1001 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1002                               char *buf)
1003 {
1004         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1005
1006         return sprintf(buf, "%pUb\n", xd->remote_uuid);
1007 }
1008 static DEVICE_ATTR_RO(unique_id);
1009
1010 static struct attribute *xdomain_attrs[] = {
1011         &dev_attr_device.attr,
1012         &dev_attr_device_name.attr,
1013         &dev_attr_unique_id.attr,
1014         &dev_attr_vendor.attr,
1015         &dev_attr_vendor_name.attr,
1016         NULL,
1017 };
1018
1019 static struct attribute_group xdomain_attr_group = {
1020         .attrs = xdomain_attrs,
1021 };
1022
1023 static const struct attribute_group *xdomain_attr_groups[] = {
1024         &xdomain_attr_group,
1025         NULL,
1026 };
1027
1028 static void tb_xdomain_release(struct device *dev)
1029 {
1030         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1031
1032         put_device(xd->dev.parent);
1033
1034         tb_property_free_dir(xd->properties);
1035         ida_destroy(&xd->service_ids);
1036
1037         kfree(xd->local_uuid);
1038         kfree(xd->remote_uuid);
1039         kfree(xd->device_name);
1040         kfree(xd->vendor_name);
1041         kfree(xd);
1042 }
1043
1044 static void start_handshake(struct tb_xdomain *xd)
1045 {
1046         xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
1047         xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
1048
1049         /* Start exchanging properties with the other host */
1050         queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1051                            msecs_to_jiffies(100));
1052         queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1053                            msecs_to_jiffies(1000));
1054 }
1055
1056 static void stop_handshake(struct tb_xdomain *xd)
1057 {
1058         xd->properties_retries = 0;
1059         xd->properties_changed_retries = 0;
1060
1061         cancel_delayed_work_sync(&xd->get_properties_work);
1062         cancel_delayed_work_sync(&xd->properties_changed_work);
1063 }
1064
1065 static int __maybe_unused tb_xdomain_suspend(struct device *dev)
1066 {
1067         stop_handshake(tb_to_xdomain(dev));
1068         return 0;
1069 }
1070
1071 static int __maybe_unused tb_xdomain_resume(struct device *dev)
1072 {
1073         struct tb_xdomain *xd = tb_to_xdomain(dev);
1074
1075         /*
1076          * Ask tb_xdomain_get_properties() restore any existing DMA
1077          * paths after properties are re-read.
1078          */
1079         xd->resume = true;
1080         start_handshake(xd);
1081
1082         return 0;
1083 }
1084
1085 static const struct dev_pm_ops tb_xdomain_pm_ops = {
1086         SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
1087 };
1088
1089 struct device_type tb_xdomain_type = {
1090         .name = "thunderbolt_xdomain",
1091         .release = tb_xdomain_release,
1092         .pm = &tb_xdomain_pm_ops,
1093 };
1094 EXPORT_SYMBOL_GPL(tb_xdomain_type);
1095
1096 /**
1097  * tb_xdomain_alloc() - Allocate new XDomain object
1098  * @tb: Domain where the XDomain belongs
1099  * @parent: Parent device (the switch through the connection to the
1100  *          other domain is reached).
1101  * @route: Route string used to reach the other domain
1102  * @local_uuid: Our local domain UUID
1103  * @remote_uuid: UUID of the other domain
1104  *
1105  * Allocates new XDomain structure and returns pointer to that. The
1106  * object must be released by calling tb_xdomain_put().
1107  */
1108 struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1109                                     u64 route, const uuid_t *local_uuid,
1110                                     const uuid_t *remote_uuid)
1111 {
1112         struct tb_xdomain *xd;
1113
1114         xd = kzalloc(sizeof(*xd), GFP_KERNEL);
1115         if (!xd)
1116                 return NULL;
1117
1118         xd->tb = tb;
1119         xd->route = route;
1120         ida_init(&xd->service_ids);
1121         mutex_init(&xd->lock);
1122         INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
1123         INIT_DELAYED_WORK(&xd->properties_changed_work,
1124                           tb_xdomain_properties_changed);
1125
1126         xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
1127         if (!xd->local_uuid)
1128                 goto err_free;
1129
1130         xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), GFP_KERNEL);
1131         if (!xd->remote_uuid)
1132                 goto err_free_local_uuid;
1133
1134         device_initialize(&xd->dev);
1135         xd->dev.parent = get_device(parent);
1136         xd->dev.bus = &tb_bus_type;
1137         xd->dev.type = &tb_xdomain_type;
1138         xd->dev.groups = xdomain_attr_groups;
1139         dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
1140
1141         return xd;
1142
1143 err_free_local_uuid:
1144         kfree(xd->local_uuid);
1145 err_free:
1146         kfree(xd);
1147
1148         return NULL;
1149 }
1150
1151 /**
1152  * tb_xdomain_add() - Add XDomain to the bus
1153  * @xd: XDomain to add
1154  *
1155  * This function starts XDomain discovery protocol handshake and
1156  * eventually adds the XDomain to the bus. After calling this function
1157  * the caller needs to call tb_xdomain_remove() in order to remove and
1158  * release the object regardless whether the handshake succeeded or not.
1159  */
1160 void tb_xdomain_add(struct tb_xdomain *xd)
1161 {
1162         /* Start exchanging properties with the other host */
1163         start_handshake(xd);
1164 }
1165
1166 static int unregister_service(struct device *dev, void *data)
1167 {
1168         device_unregister(dev);
1169         return 0;
1170 }
1171
1172 /**
1173  * tb_xdomain_remove() - Remove XDomain from the bus
1174  * @xd: XDomain to remove
1175  *
1176  * This will stop all ongoing configuration work and remove the XDomain
1177  * along with any services from the bus. When the last reference to @xd
1178  * is released the object will be released as well.
1179  */
1180 void tb_xdomain_remove(struct tb_xdomain *xd)
1181 {
1182         stop_handshake(xd);
1183
1184         device_for_each_child_reverse(&xd->dev, xd, unregister_service);
1185
1186         if (!device_is_registered(&xd->dev))
1187                 put_device(&xd->dev);
1188         else
1189                 device_unregister(&xd->dev);
1190 }
1191
1192 /**
1193  * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
1194  * @xd: XDomain connection
1195  * @transmit_path: HopID of the transmit path the other end is using to
1196  *                 send packets
1197  * @transmit_ring: DMA ring used to receive packets from the other end
1198  * @receive_path: HopID of the receive path the other end is using to
1199  *                receive packets
1200  * @receive_ring: DMA ring used to send packets to the other end
1201  *
1202  * The function enables DMA paths accordingly so that after successful
1203  * return the caller can send and receive packets using high-speed DMA
1204  * path.
1205  *
1206  * Return: %0 in case of success and negative errno in case of error
1207  */
1208 int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
1209                             u16 transmit_ring, u16 receive_path,
1210                             u16 receive_ring)
1211 {
1212         int ret;
1213
1214         mutex_lock(&xd->lock);
1215
1216         if (xd->transmit_path) {
1217                 ret = xd->transmit_path == transmit_path ? 0 : -EBUSY;
1218                 goto exit_unlock;
1219         }
1220
1221         xd->transmit_path = transmit_path;
1222         xd->transmit_ring = transmit_ring;
1223         xd->receive_path = receive_path;
1224         xd->receive_ring = receive_ring;
1225
1226         ret = tb_domain_approve_xdomain_paths(xd->tb, xd);
1227
1228 exit_unlock:
1229         mutex_unlock(&xd->lock);
1230
1231         return ret;
1232 }
1233 EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
1234
1235 /**
1236  * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
1237  * @xd: XDomain connection
1238  *
1239  * This does the opposite of tb_xdomain_enable_paths(). After call to
1240  * this the caller is not expected to use the rings anymore.
1241  *
1242  * Return: %0 in case of success and negative errno in case of error
1243  */
1244 int tb_xdomain_disable_paths(struct tb_xdomain *xd)
1245 {
1246         int ret = 0;
1247
1248         mutex_lock(&xd->lock);
1249         if (xd->transmit_path) {
1250                 xd->transmit_path = 0;
1251                 xd->transmit_ring = 0;
1252                 xd->receive_path = 0;
1253                 xd->receive_ring = 0;
1254
1255                 ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd);
1256         }
1257         mutex_unlock(&xd->lock);
1258
1259         return ret;
1260 }
1261 EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
1262
1263 struct tb_xdomain_lookup {
1264         const uuid_t *uuid;
1265         u8 link;
1266         u8 depth;
1267 };
1268
1269 static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
1270         const struct tb_xdomain_lookup *lookup)
1271 {
1272         int i;
1273
1274         for (i = 1; i <= sw->config.max_port_number; i++) {
1275                 struct tb_port *port = &sw->ports[i];
1276                 struct tb_xdomain *xd;
1277
1278                 if (tb_is_upstream_port(port))
1279                         continue;
1280
1281                 if (port->xdomain) {
1282                         xd = port->xdomain;
1283
1284                         if (lookup->uuid) {
1285                                 if (uuid_equal(xd->remote_uuid, lookup->uuid))
1286                                         return xd;
1287                         } else if (lookup->link == xd->link &&
1288                                    lookup->depth == xd->depth) {
1289                                 return xd;
1290                         }
1291                 } else if (port->remote) {
1292                         xd = switch_find_xdomain(port->remote->sw, lookup);
1293                         if (xd)
1294                                 return xd;
1295                 }
1296         }
1297
1298         return NULL;
1299 }
1300
1301 /**
1302  * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
1303  * @tb: Domain where the XDomain belongs to
1304  * @uuid: UUID to look for
1305  *
1306  * Finds XDomain by walking through the Thunderbolt topology below @tb.
1307  * The returned XDomain will have its reference count increased so the
1308  * caller needs to call tb_xdomain_put() when it is done with the
1309  * object.
1310  *
1311  * This will find all XDomains including the ones that are not yet added
1312  * to the bus (handshake is still in progress).
1313  *
1314  * The caller needs to hold @tb->lock.
1315  */
1316 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
1317 {
1318         struct tb_xdomain_lookup lookup;
1319         struct tb_xdomain *xd;
1320
1321         memset(&lookup, 0, sizeof(lookup));
1322         lookup.uuid = uuid;
1323
1324         xd = switch_find_xdomain(tb->root_switch, &lookup);
1325         if (xd) {
1326                 get_device(&xd->dev);
1327                 return xd;
1328         }
1329
1330         return NULL;
1331 }
1332 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
1333
1334 /**
1335  * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
1336  * @tb: Domain where the XDomain belongs to
1337  * @link: Root switch link number
1338  * @depth: Depth in the link
1339  *
1340  * Finds XDomain by walking through the Thunderbolt topology below @tb.
1341  * The returned XDomain will have its reference count increased so the
1342  * caller needs to call tb_xdomain_put() when it is done with the
1343  * object.
1344  *
1345  * This will find all XDomains including the ones that are not yet added
1346  * to the bus (handshake is still in progress).
1347  *
1348  * The caller needs to hold @tb->lock.
1349  */
1350 struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
1351                                                  u8 depth)
1352 {
1353         struct tb_xdomain_lookup lookup;
1354         struct tb_xdomain *xd;
1355
1356         memset(&lookup, 0, sizeof(lookup));
1357         lookup.link = link;
1358         lookup.depth = depth;
1359
1360         xd = switch_find_xdomain(tb->root_switch, &lookup);
1361         if (xd) {
1362                 get_device(&xd->dev);
1363                 return xd;
1364         }
1365
1366         return NULL;
1367 }
1368
1369 bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
1370                                const void *buf, size_t size)
1371 {
1372         const struct tb_protocol_handler *handler, *tmp;
1373         const struct tb_xdp_header *hdr = buf;
1374         unsigned int length;
1375         int ret = 0;
1376
1377         /* We expect the packet is at least size of the header */
1378         length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
1379         if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
1380                 return true;
1381         if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
1382                 return true;
1383
1384         /*
1385          * Handle XDomain discovery protocol packets directly here. For
1386          * other protocols (based on their UUID) we call registered
1387          * handlers in turn.
1388          */
1389         if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
1390                 if (type == TB_CFG_PKG_XDOMAIN_REQ) {
1391                         tb_xdp_schedule_request(tb, hdr, size);
1392                         return true;
1393                 }
1394                 return false;
1395         }
1396
1397         mutex_lock(&xdomain_lock);
1398         list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
1399                 if (!uuid_equal(&hdr->uuid, handler->uuid))
1400                         continue;
1401
1402                 mutex_unlock(&xdomain_lock);
1403                 ret = handler->callback(buf, size, handler->data);
1404                 mutex_lock(&xdomain_lock);
1405
1406                 if (ret)
1407                         break;
1408         }
1409         mutex_unlock(&xdomain_lock);
1410
1411         return ret > 0;
1412 }
1413
1414 static int rebuild_property_block(void)
1415 {
1416         u32 *block, len;
1417         int ret;
1418
1419         ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
1420         if (ret < 0)
1421                 return ret;
1422
1423         len = ret;
1424
1425         block = kcalloc(len, sizeof(u32), GFP_KERNEL);
1426         if (!block)
1427                 return -ENOMEM;
1428
1429         ret = tb_property_format_dir(xdomain_property_dir, block, len);
1430         if (ret) {
1431                 kfree(block);
1432                 return ret;
1433         }
1434
1435         kfree(xdomain_property_block);
1436         xdomain_property_block = block;
1437         xdomain_property_block_len = len;
1438         xdomain_property_block_gen++;
1439
1440         return 0;
1441 }
1442
1443 static int update_xdomain(struct device *dev, void *data)
1444 {
1445         struct tb_xdomain *xd;
1446
1447         xd = tb_to_xdomain(dev);
1448         if (xd) {
1449                 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1450                                    msecs_to_jiffies(50));
1451         }
1452
1453         return 0;
1454 }
1455
1456 static void update_all_xdomains(void)
1457 {
1458         bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
1459 }
1460
1461 static bool remove_directory(const char *key, const struct tb_property_dir *dir)
1462 {
1463         struct tb_property *p;
1464
1465         p = tb_property_find(xdomain_property_dir, key,
1466                              TB_PROPERTY_TYPE_DIRECTORY);
1467         if (p && p->value.dir == dir) {
1468                 tb_property_remove(p);
1469                 return true;
1470         }
1471         return false;
1472 }
1473
1474 /**
1475  * tb_register_property_dir() - Register property directory to the host
1476  * @key: Key (name) of the directory to add
1477  * @dir: Directory to add
1478  *
1479  * Service drivers can use this function to add new property directory
1480  * to the host available properties. The other connected hosts are
1481  * notified so they can re-read properties of this host if they are
1482  * interested.
1483  *
1484  * Return: %0 on success and negative errno on failure
1485  */
1486 int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
1487 {
1488         int ret;
1489
1490         if (WARN_ON(!xdomain_property_dir))
1491                 return -EAGAIN;
1492
1493         if (!key || strlen(key) > 8)
1494                 return -EINVAL;
1495
1496         mutex_lock(&xdomain_lock);
1497         if (tb_property_find(xdomain_property_dir, key,
1498                              TB_PROPERTY_TYPE_DIRECTORY)) {
1499                 ret = -EEXIST;
1500                 goto err_unlock;
1501         }
1502
1503         ret = tb_property_add_dir(xdomain_property_dir, key, dir);
1504         if (ret)
1505                 goto err_unlock;
1506
1507         ret = rebuild_property_block();
1508         if (ret) {
1509                 remove_directory(key, dir);
1510                 goto err_unlock;
1511         }
1512
1513         mutex_unlock(&xdomain_lock);
1514         update_all_xdomains();
1515         return 0;
1516
1517 err_unlock:
1518         mutex_unlock(&xdomain_lock);
1519         return ret;
1520 }
1521 EXPORT_SYMBOL_GPL(tb_register_property_dir);
1522
1523 /**
1524  * tb_unregister_property_dir() - Removes property directory from host
1525  * @key: Key (name) of the directory
1526  * @dir: Directory to remove
1527  *
1528  * This will remove the existing directory from this host and notify the
1529  * connected hosts about the change.
1530  */
1531 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
1532 {
1533         int ret = 0;
1534
1535         mutex_lock(&xdomain_lock);
1536         if (remove_directory(key, dir))
1537                 ret = rebuild_property_block();
1538         mutex_unlock(&xdomain_lock);
1539
1540         if (!ret)
1541                 update_all_xdomains();
1542 }
1543 EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
1544
1545 int tb_xdomain_init(void)
1546 {
1547         int ret;
1548
1549         xdomain_property_dir = tb_property_create_dir(NULL);
1550         if (!xdomain_property_dir)
1551                 return -ENOMEM;
1552
1553         /*
1554          * Initialize standard set of properties without any service
1555          * directories. Those will be added by service drivers
1556          * themselves when they are loaded.
1557          */
1558         tb_property_add_immediate(xdomain_property_dir, "vendorid",
1559                                   PCI_VENDOR_ID_INTEL);
1560         tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
1561         tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
1562         tb_property_add_text(xdomain_property_dir, "deviceid",
1563                              utsname()->nodename);
1564         tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
1565
1566         ret = rebuild_property_block();
1567         if (ret) {
1568                 tb_property_free_dir(xdomain_property_dir);
1569                 xdomain_property_dir = NULL;
1570         }
1571
1572         return ret;
1573 }
1574
1575 void tb_xdomain_exit(void)
1576 {
1577         kfree(xdomain_property_block);
1578         tb_property_free_dir(xdomain_property_dir);
1579 }
This page took 0.113901 seconds and 4 git commands to generate.