]>
Commit | Line | Data |
---|---|---|
eb50fd3a | 1 | // SPDX-License-Identifier: GPL-2.0 |
30c6d9d7 AE |
2 | /* |
3 | * SVC Greybus driver. | |
4 | * | |
5 | * Copyright 2015 Google Inc. | |
6 | * Copyright 2015 Linaro Ltd. | |
30c6d9d7 AE |
7 | */ |
8 | ||
9504677c | 9 | #include <linux/debugfs.h> |
067906f6 | 10 | #include <linux/workqueue.h> |
ec0ad868 | 11 | #include <linux/greybus.h> |
f66427ad | 12 | |
140026b3 JC |
13 | #define SVC_INTF_EJECT_TIMEOUT 9000 |
14 | #define SVC_INTF_ACTIVATE_TIMEOUT 6000 | |
1f3e09e7 | 15 | #define SVC_INTF_RESUME_TIMEOUT 3000 |
d18da86b | 16 | |
9ae4109e | 17 | struct gb_svc_deferred_request { |
067906f6 | 18 | struct work_struct work; |
9ae4109e | 19 | struct gb_operation *operation; |
067906f6 VK |
20 | }; |
21 | ||
ee2f2074 MT |
22 | static int gb_svc_queue_deferred_request(struct gb_operation *operation); |
23 | ||
66069fb0 | 24 | static ssize_t endo_id_show(struct device *dev, |
8478c35a | 25 | struct device_attribute *attr, char *buf) |
66069fb0 JH |
26 | { |
27 | struct gb_svc *svc = to_gb_svc(dev); | |
28 | ||
29 | return sprintf(buf, "0x%04x\n", svc->endo_id); | |
30 | } | |
31 | static DEVICE_ATTR_RO(endo_id); | |
32 | ||
33 | static ssize_t ap_intf_id_show(struct device *dev, | |
8478c35a | 34 | struct device_attribute *attr, char *buf) |
66069fb0 JH |
35 | { |
36 | struct gb_svc *svc = to_gb_svc(dev); | |
37 | ||
38 | return sprintf(buf, "%u\n", svc->ap_intf_id); | |
39 | } | |
40 | static DEVICE_ATTR_RO(ap_intf_id); | |
41 | ||
2c92bd52 RMS |
42 | // FIXME |
43 | // This is a hack, we need to do this "right" and clean the interface up | |
44 | // properly, not just forcibly yank the thing out of the system and hope for the | |
45 | // best. But for now, people want their modules to come out without having to | |
46 | // throw the thing to the ground or get out a screwdriver. | |
47 | static ssize_t intf_eject_store(struct device *dev, | |
48 | struct device_attribute *attr, const char *buf, | |
49 | size_t len) | |
50 | { | |
51 | struct gb_svc *svc = to_gb_svc(dev); | |
52 | unsigned short intf_id; | |
53 | int ret; | |
54 | ||
55 | ret = kstrtou16(buf, 10, &intf_id); | |
56 | if (ret < 0) | |
57 | return ret; | |
58 | ||
59 | dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id); | |
60 | ||
61 | ret = gb_svc_intf_eject(svc, intf_id); | |
62 | if (ret < 0) | |
63 | return ret; | |
64 | ||
65 | return len; | |
66 | } | |
67 | static DEVICE_ATTR_WO(intf_eject); | |
68 | ||
d562853d GKH |
69 | static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr, |
70 | char *buf) | |
71 | { | |
72 | struct gb_svc *svc = to_gb_svc(dev); | |
73 | ||
74 | return sprintf(buf, "%s\n", | |
75 | gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled"); | |
76 | } | |
77 | ||
78 | static ssize_t watchdog_store(struct device *dev, | |
79 | struct device_attribute *attr, const char *buf, | |
80 | size_t len) | |
81 | { | |
82 | struct gb_svc *svc = to_gb_svc(dev); | |
83 | int retval; | |
84 | bool user_request; | |
85 | ||
86 | retval = strtobool(buf, &user_request); | |
87 | if (retval) | |
88 | return retval; | |
89 | ||
90 | if (user_request) | |
91 | retval = gb_svc_watchdog_enable(svc); | |
92 | else | |
93 | retval = gb_svc_watchdog_disable(svc); | |
94 | if (retval) | |
95 | return retval; | |
96 | return len; | |
97 | } | |
98 | static DEVICE_ATTR_RW(watchdog); | |
99 | ||
7c4a0edb DL |
100 | static ssize_t watchdog_action_show(struct device *dev, |
101 | struct device_attribute *attr, char *buf) | |
102 | { | |
103 | struct gb_svc *svc = to_gb_svc(dev); | |
104 | ||
105 | if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) | |
106 | return sprintf(buf, "panic\n"); | |
107 | else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) | |
108 | return sprintf(buf, "reset\n"); | |
109 | ||
110 | return -EINVAL; | |
111 | } | |
112 | ||
113 | static ssize_t watchdog_action_store(struct device *dev, | |
114 | struct device_attribute *attr, | |
115 | const char *buf, size_t len) | |
116 | { | |
117 | struct gb_svc *svc = to_gb_svc(dev); | |
118 | ||
119 | if (sysfs_streq(buf, "panic")) | |
120 | svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL; | |
121 | else if (sysfs_streq(buf, "reset")) | |
122 | svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO; | |
123 | else | |
124 | return -EINVAL; | |
125 | ||
126 | return len; | |
127 | } | |
128 | static DEVICE_ATTR_RW(watchdog_action); | |
129 | ||
9504677c DL |
130 | static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value) |
131 | { | |
132 | struct gb_svc_pwrmon_rail_count_get_response response; | |
133 | int ret; | |
134 | ||
135 | ret = gb_operation_sync(svc->connection, | |
136 | GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0, | |
137 | &response, sizeof(response)); | |
138 | if (ret) { | |
89f2df43 | 139 | dev_err(&svc->dev, "failed to get rail count: %d\n", ret); |
9504677c DL |
140 | return ret; |
141 | } | |
142 | ||
143 | *value = response.rail_count; | |
144 | ||
145 | return 0; | |
146 | } | |
147 | ||
148 | static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc, | |
f35fdb2f JH |
149 | struct gb_svc_pwrmon_rail_names_get_response *response, |
150 | size_t bufsize) | |
9504677c DL |
151 | { |
152 | int ret; | |
153 | ||
154 | ret = gb_operation_sync(svc->connection, | |
155 | GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0, | |
156 | response, bufsize); | |
157 | if (ret) { | |
89f2df43 | 158 | dev_err(&svc->dev, "failed to get rail names: %d\n", ret); |
9504677c DL |
159 | return ret; |
160 | } | |
161 | ||
8fb76c3c DL |
162 | if (response->status != GB_SVC_OP_SUCCESS) { |
163 | dev_err(&svc->dev, | |
164 | "SVC error while getting rail names: %u\n", | |
165 | response->status); | |
166 | return -EREMOTEIO; | |
167 | } | |
168 | ||
9504677c DL |
169 | return 0; |
170 | } | |
171 | ||
172 | static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id, | |
173 | u8 measurement_type, u32 *value) | |
174 | { | |
175 | struct gb_svc_pwrmon_sample_get_request request; | |
176 | struct gb_svc_pwrmon_sample_get_response response; | |
177 | int ret; | |
178 | ||
179 | request.rail_id = rail_id; | |
180 | request.measurement_type = measurement_type; | |
181 | ||
182 | ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET, | |
183 | &request, sizeof(request), | |
184 | &response, sizeof(response)); | |
185 | if (ret) { | |
89f2df43 | 186 | dev_err(&svc->dev, "failed to get rail sample: %d\n", ret); |
9504677c DL |
187 | return ret; |
188 | } | |
189 | ||
190 | if (response.result) { | |
191 | dev_err(&svc->dev, | |
192 | "UniPro error while getting rail power sample (%d %d): %d\n", | |
193 | rail_id, measurement_type, response.result); | |
194 | switch (response.result) { | |
195 | case GB_SVC_PWRMON_GET_SAMPLE_INVAL: | |
196 | return -EINVAL; | |
197 | case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: | |
5b35ef95 | 198 | return -ENOMSG; |
9504677c | 199 | default: |
4aea5a15 | 200 | return -EREMOTEIO; |
9504677c DL |
201 | } |
202 | } | |
203 | ||
204 | *value = le32_to_cpu(response.measurement); | |
205 | ||
206 | return 0; | |
207 | } | |
208 | ||
ddb10c8a DL |
209 | int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id, |
210 | u8 measurement_type, u32 *value) | |
211 | { | |
212 | struct gb_svc_pwrmon_intf_sample_get_request request; | |
213 | struct gb_svc_pwrmon_intf_sample_get_response response; | |
214 | int ret; | |
215 | ||
216 | request.intf_id = intf_id; | |
217 | request.measurement_type = measurement_type; | |
218 | ||
219 | ret = gb_operation_sync(svc->connection, | |
220 | GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET, | |
221 | &request, sizeof(request), | |
222 | &response, sizeof(response)); | |
223 | if (ret) { | |
89f2df43 | 224 | dev_err(&svc->dev, "failed to get intf sample: %d\n", ret); |
ddb10c8a DL |
225 | return ret; |
226 | } | |
227 | ||
228 | if (response.result) { | |
229 | dev_err(&svc->dev, | |
230 | "UniPro error while getting intf power sample (%d %d): %d\n", | |
231 | intf_id, measurement_type, response.result); | |
232 | switch (response.result) { | |
233 | case GB_SVC_PWRMON_GET_SAMPLE_INVAL: | |
234 | return -EINVAL; | |
235 | case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: | |
0bba4fb5 | 236 | return -ENOMSG; |
ddb10c8a | 237 | default: |
4aea5a15 | 238 | return -EREMOTEIO; |
ddb10c8a DL |
239 | } |
240 | } | |
241 | ||
242 | *value = le32_to_cpu(response.measurement); | |
243 | ||
244 | return 0; | |
245 | } | |
246 | ||
66069fb0 JH |
247 | static struct attribute *svc_attrs[] = { |
248 | &dev_attr_endo_id.attr, | |
249 | &dev_attr_ap_intf_id.attr, | |
2c92bd52 | 250 | &dev_attr_intf_eject.attr, |
d562853d | 251 | &dev_attr_watchdog.attr, |
7c4a0edb | 252 | &dev_attr_watchdog_action.attr, |
66069fb0 JH |
253 | NULL, |
254 | }; | |
255 | ATTRIBUTE_GROUPS(svc); | |
256 | ||
4d5f6218 | 257 | int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id) |
30c6d9d7 AE |
258 | { |
259 | struct gb_svc_intf_device_id_request request; | |
260 | ||
261 | request.intf_id = intf_id; | |
262 | request.device_id = device_id; | |
263 | ||
264 | return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID, | |
265 | &request, sizeof(request), NULL, 0); | |
266 | } | |
267 | ||
c5d55fb3 RMS |
268 | int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id) |
269 | { | |
270 | struct gb_svc_intf_eject_request request; | |
e676ccd7 | 271 | int ret; |
c5d55fb3 RMS |
272 | |
273 | request.intf_id = intf_id; | |
274 | ||
275 | /* | |
276 | * The pulse width for module release in svc is long so we need to | |
277 | * increase the timeout so the operation will not return to soon. | |
278 | */ | |
e676ccd7 JH |
279 | ret = gb_operation_sync_timeout(svc->connection, |
280 | GB_SVC_TYPE_INTF_EJECT, &request, | |
281 | sizeof(request), NULL, 0, | |
d18da86b | 282 | SVC_INTF_EJECT_TIMEOUT); |
e676ccd7 JH |
283 | if (ret) { |
284 | dev_err(&svc->dev, "failed to eject interface %u\n", intf_id); | |
285 | return ret; | |
286 | } | |
287 | ||
288 | return 0; | |
c5d55fb3 | 289 | } |
c5d55fb3 | 290 | |
017482b2 JH |
291 | int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable) |
292 | { | |
144763bf JC |
293 | struct gb_svc_intf_vsys_request request; |
294 | struct gb_svc_intf_vsys_response response; | |
295 | int type, ret; | |
296 | ||
297 | request.intf_id = intf_id; | |
017482b2 | 298 | |
144763bf JC |
299 | if (enable) |
300 | type = GB_SVC_TYPE_INTF_VSYS_ENABLE; | |
301 | else | |
302 | type = GB_SVC_TYPE_INTF_VSYS_DISABLE; | |
303 | ||
304 | ret = gb_operation_sync(svc->connection, type, | |
8478c35a CS |
305 | &request, sizeof(request), |
306 | &response, sizeof(response)); | |
144763bf JC |
307 | if (ret < 0) |
308 | return ret; | |
309 | if (response.result_code != GB_SVC_INTF_VSYS_OK) | |
310 | return -EREMOTEIO; | |
017482b2 JH |
311 | return 0; |
312 | } | |
313 | ||
314 | int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable) | |
315 | { | |
144763bf JC |
316 | struct gb_svc_intf_refclk_request request; |
317 | struct gb_svc_intf_refclk_response response; | |
318 | int type, ret; | |
017482b2 | 319 | |
144763bf JC |
320 | request.intf_id = intf_id; |
321 | ||
322 | if (enable) | |
323 | type = GB_SVC_TYPE_INTF_REFCLK_ENABLE; | |
324 | else | |
325 | type = GB_SVC_TYPE_INTF_REFCLK_DISABLE; | |
326 | ||
327 | ret = gb_operation_sync(svc->connection, type, | |
8478c35a CS |
328 | &request, sizeof(request), |
329 | &response, sizeof(response)); | |
144763bf JC |
330 | if (ret < 0) |
331 | return ret; | |
332 | if (response.result_code != GB_SVC_INTF_REFCLK_OK) | |
333 | return -EREMOTEIO; | |
017482b2 JH |
334 | return 0; |
335 | } | |
336 | ||
337 | int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable) | |
338 | { | |
144763bf JC |
339 | struct gb_svc_intf_unipro_request request; |
340 | struct gb_svc_intf_unipro_response response; | |
341 | int type, ret; | |
342 | ||
343 | request.intf_id = intf_id; | |
344 | ||
345 | if (enable) | |
346 | type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE; | |
347 | else | |
348 | type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE; | |
017482b2 | 349 | |
144763bf | 350 | ret = gb_operation_sync(svc->connection, type, |
8478c35a CS |
351 | &request, sizeof(request), |
352 | &response, sizeof(response)); | |
144763bf JC |
353 | if (ret < 0) |
354 | return ret; | |
355 | if (response.result_code != GB_SVC_INTF_UNIPRO_OK) | |
356 | return -EREMOTEIO; | |
017482b2 JH |
357 | return 0; |
358 | } | |
359 | ||
1e8e22b5 JH |
360 | int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type) |
361 | { | |
140026b3 JC |
362 | struct gb_svc_intf_activate_request request; |
363 | struct gb_svc_intf_activate_response response; | |
364 | int ret; | |
365 | ||
366 | request.intf_id = intf_id; | |
367 | ||
368 | ret = gb_operation_sync_timeout(svc->connection, | |
8478c35a CS |
369 | GB_SVC_TYPE_INTF_ACTIVATE, |
370 | &request, sizeof(request), | |
371 | &response, sizeof(response), | |
372 | SVC_INTF_ACTIVATE_TIMEOUT); | |
140026b3 JC |
373 | if (ret < 0) |
374 | return ret; | |
03fba180 JC |
375 | if (response.status != GB_SVC_OP_SUCCESS) { |
376 | dev_err(&svc->dev, "failed to activate interface %u: %u\n", | |
8478c35a | 377 | intf_id, response.status); |
03fba180 JC |
378 | return -EREMOTEIO; |
379 | } | |
1e8e22b5 | 380 | |
140026b3 | 381 | *intf_type = response.intf_type; |
1e8e22b5 JH |
382 | |
383 | return 0; | |
384 | } | |
385 | ||
fc8a4027 DL |
386 | int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id) |
387 | { | |
388 | struct gb_svc_intf_resume_request request; | |
389 | struct gb_svc_intf_resume_response response; | |
390 | int ret; | |
391 | ||
392 | request.intf_id = intf_id; | |
393 | ||
394 | ret = gb_operation_sync_timeout(svc->connection, | |
395 | GB_SVC_TYPE_INTF_RESUME, | |
396 | &request, sizeof(request), | |
397 | &response, sizeof(response), | |
398 | SVC_INTF_RESUME_TIMEOUT); | |
399 | if (ret < 0) { | |
400 | dev_err(&svc->dev, "failed to send interface resume %u: %d\n", | |
401 | intf_id, ret); | |
402 | return ret; | |
403 | } | |
404 | ||
405 | if (response.status != GB_SVC_OP_SUCCESS) { | |
406 | dev_err(&svc->dev, "failed to resume interface %u: %u\n", | |
407 | intf_id, response.status); | |
408 | return -EREMOTEIO; | |
409 | } | |
410 | ||
411 | return 0; | |
412 | } | |
413 | ||
19151c3d VK |
414 | int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, |
415 | u32 *value) | |
416 | { | |
417 | struct gb_svc_dme_peer_get_request request; | |
418 | struct gb_svc_dme_peer_get_response response; | |
419 | u16 result; | |
420 | int ret; | |
421 | ||
422 | request.intf_id = intf_id; | |
423 | request.attr = cpu_to_le16(attr); | |
424 | request.selector = cpu_to_le16(selector); | |
425 | ||
426 | ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET, | |
427 | &request, sizeof(request), | |
428 | &response, sizeof(response)); | |
429 | if (ret) { | |
b933fa4a | 430 | dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n", |
8478c35a | 431 | intf_id, attr, selector, ret); |
19151c3d VK |
432 | return ret; |
433 | } | |
434 | ||
435 | result = le16_to_cpu(response.result_code); | |
436 | if (result) { | |
b933fa4a | 437 | dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n", |
8478c35a | 438 | intf_id, attr, selector, result); |
4aea5a15 | 439 | return -EREMOTEIO; |
19151c3d VK |
440 | } |
441 | ||
442 | if (value) | |
443 | *value = le32_to_cpu(response.attr_value); | |
444 | ||
445 | return 0; | |
446 | } | |
19151c3d VK |
447 | |
448 | int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, | |
449 | u32 value) | |
450 | { | |
451 | struct gb_svc_dme_peer_set_request request; | |
452 | struct gb_svc_dme_peer_set_response response; | |
453 | u16 result; | |
454 | int ret; | |
455 | ||
456 | request.intf_id = intf_id; | |
457 | request.attr = cpu_to_le16(attr); | |
458 | request.selector = cpu_to_le16(selector); | |
459 | request.value = cpu_to_le32(value); | |
460 | ||
461 | ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET, | |
462 | &request, sizeof(request), | |
463 | &response, sizeof(response)); | |
464 | if (ret) { | |
b933fa4a | 465 | dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n", |
8478c35a | 466 | intf_id, attr, selector, value, ret); |
19151c3d VK |
467 | return ret; |
468 | } | |
469 | ||
470 | result = le16_to_cpu(response.result_code); | |
471 | if (result) { | |
b933fa4a | 472 | dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n", |
8478c35a | 473 | intf_id, attr, selector, value, result); |
4aea5a15 | 474 | return -EREMOTEIO; |
19151c3d VK |
475 | } |
476 | ||
477 | return 0; | |
478 | } | |
19151c3d | 479 | |
3f0e9183 | 480 | int gb_svc_connection_create(struct gb_svc *svc, |
8478c35a CS |
481 | u8 intf1_id, u16 cport1_id, |
482 | u8 intf2_id, u16 cport2_id, | |
483 | u8 cport_flags) | |
30c6d9d7 AE |
484 | { |
485 | struct gb_svc_conn_create_request request; | |
486 | ||
487 | request.intf1_id = intf1_id; | |
2498050b | 488 | request.cport1_id = cpu_to_le16(cport1_id); |
30c6d9d7 | 489 | request.intf2_id = intf2_id; |
2498050b | 490 | request.cport2_id = cpu_to_le16(cport2_id); |
34145b60 | 491 | request.tc = 0; /* TC0 */ |
27f25c17 | 492 | request.flags = cport_flags; |
30c6d9d7 AE |
493 | |
494 | return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE, | |
495 | &request, sizeof(request), NULL, 0); | |
496 | } | |
497 | ||
3f0e9183 VK |
498 | void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, |
499 | u8 intf2_id, u16 cport2_id) | |
30c6d9d7 AE |
500 | { |
501 | struct gb_svc_conn_destroy_request request; | |
d9fcffff VK |
502 | struct gb_connection *connection = svc->connection; |
503 | int ret; | |
30c6d9d7 AE |
504 | |
505 | request.intf1_id = intf1_id; | |
2498050b | 506 | request.cport1_id = cpu_to_le16(cport1_id); |
30c6d9d7 | 507 | request.intf2_id = intf2_id; |
2498050b | 508 | request.cport2_id = cpu_to_le16(cport2_id); |
30c6d9d7 | 509 | |
d9fcffff VK |
510 | ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY, |
511 | &request, sizeof(request), NULL, 0); | |
684156a9 | 512 | if (ret) { |
2f3db927 | 513 | dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n", |
8478c35a | 514 | intf1_id, cport1_id, intf2_id, cport2_id, ret); |
684156a9 | 515 | } |
30c6d9d7 AE |
516 | } |
517 | ||
bb106852 | 518 | /* Creates bi-directional routes between the devices */ |
4d5f6218 | 519 | int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id, |
8478c35a | 520 | u8 intf2_id, u8 dev2_id) |
e08aaa49 PH |
521 | { |
522 | struct gb_svc_route_create_request request; | |
523 | ||
524 | request.intf1_id = intf1_id; | |
525 | request.dev1_id = dev1_id; | |
526 | request.intf2_id = intf2_id; | |
527 | request.dev2_id = dev2_id; | |
528 | ||
529 | return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE, | |
530 | &request, sizeof(request), NULL, 0); | |
531 | } | |
e08aaa49 | 532 | |
0a020570 | 533 | /* Destroys bi-directional routes between the devices */ |
4d5f6218 | 534 | void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id) |
0a020570 VK |
535 | { |
536 | struct gb_svc_route_destroy_request request; | |
537 | int ret; | |
538 | ||
539 | request.intf1_id = intf1_id; | |
540 | request.intf2_id = intf2_id; | |
541 | ||
542 | ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY, | |
543 | &request, sizeof(request), NULL, 0); | |
684156a9 | 544 | if (ret) { |
2f3db927 | 545 | dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n", |
8478c35a | 546 | intf1_id, intf2_id, ret); |
684156a9 | 547 | } |
0a020570 VK |
548 | } |
549 | ||
aab4a1a3 LP |
550 | int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series, |
551 | u8 tx_mode, u8 tx_gear, u8 tx_nlanes, | |
8c2522d8 | 552 | u8 tx_amplitude, u8 tx_hs_equalizer, |
aab4a1a3 | 553 | u8 rx_mode, u8 rx_gear, u8 rx_nlanes, |
8c2522d8 ES |
554 | u8 flags, u32 quirks, |
555 | struct gb_svc_l2_timer_cfg *local, | |
556 | struct gb_svc_l2_timer_cfg *remote) | |
784f8761 | 557 | { |
aab4a1a3 LP |
558 | struct gb_svc_intf_set_pwrm_request request; |
559 | struct gb_svc_intf_set_pwrm_response response; | |
560 | int ret; | |
8c2522d8 ES |
561 | u16 result_code; |
562 | ||
563 | memset(&request, 0, sizeof(request)); | |
784f8761 LP |
564 | |
565 | request.intf_id = intf_id; | |
aab4a1a3 LP |
566 | request.hs_series = hs_series; |
567 | request.tx_mode = tx_mode; | |
568 | request.tx_gear = tx_gear; | |
569 | request.tx_nlanes = tx_nlanes; | |
8c2522d8 ES |
570 | request.tx_amplitude = tx_amplitude; |
571 | request.tx_hs_equalizer = tx_hs_equalizer; | |
aab4a1a3 LP |
572 | request.rx_mode = rx_mode; |
573 | request.rx_gear = rx_gear; | |
574 | request.rx_nlanes = rx_nlanes; | |
784f8761 | 575 | request.flags = flags; |
aab4a1a3 | 576 | request.quirks = cpu_to_le32(quirks); |
8c2522d8 ES |
577 | if (local) |
578 | request.local_l2timerdata = *local; | |
579 | if (remote) | |
580 | request.remote_l2timerdata = *remote; | |
784f8761 | 581 | |
aab4a1a3 LP |
582 | ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, |
583 | &request, sizeof(request), | |
584 | &response, sizeof(response)); | |
585 | if (ret < 0) | |
586 | return ret; | |
587 | ||
8c2522d8 ES |
588 | result_code = response.result_code; |
589 | if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) { | |
590 | dev_err(&svc->dev, "set power mode = %d\n", result_code); | |
591 | return -EIO; | |
592 | } | |
593 | ||
594 | return 0; | |
784f8761 | 595 | } |
46bb647b | 596 | EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode); |
784f8761 | 597 | |
c7dc28ff DL |
598 | int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id) |
599 | { | |
600 | struct gb_svc_intf_set_pwrm_request request; | |
601 | struct gb_svc_intf_set_pwrm_response response; | |
602 | int ret; | |
603 | u16 result_code; | |
604 | ||
605 | memset(&request, 0, sizeof(request)); | |
606 | ||
607 | request.intf_id = intf_id; | |
608 | request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A; | |
609 | request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; | |
610 | request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; | |
611 | ||
612 | ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, | |
613 | &request, sizeof(request), | |
614 | &response, sizeof(response)); | |
615 | if (ret < 0) { | |
616 | dev_err(&svc->dev, | |
617 | "failed to send set power mode operation to interface %u: %d\n", | |
618 | intf_id, ret); | |
619 | return ret; | |
620 | } | |
621 | ||
622 | result_code = response.result_code; | |
623 | if (result_code != GB_SVC_SETPWRM_PWR_OK) { | |
624 | dev_err(&svc->dev, | |
625 | "failed to hibernate the link for interface %u: %u\n", | |
626 | intf_id, result_code); | |
627 | return -EIO; | |
628 | } | |
629 | ||
630 | return 0; | |
631 | } | |
632 | ||
55ec09e8 GKH |
633 | int gb_svc_ping(struct gb_svc *svc) |
634 | { | |
839ac5b9 GKH |
635 | return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING, |
636 | NULL, 0, NULL, 0, | |
637 | GB_OPERATION_TIMEOUT_DEFAULT * 2); | |
55ec09e8 | 638 | } |
55ec09e8 | 639 | |
ead35460 VK |
640 | static int gb_svc_version_request(struct gb_operation *op) |
641 | { | |
642 | struct gb_connection *connection = op->connection; | |
0ec30632 | 643 | struct gb_svc *svc = gb_connection_get_data(connection); |
a2cf2e59 JH |
644 | struct gb_svc_version_request *request; |
645 | struct gb_svc_version_response *response; | |
ead35460 | 646 | |
55510843 | 647 | if (op->request->payload_size < sizeof(*request)) { |
684156a9 | 648 | dev_err(&svc->dev, "short version request (%zu < %zu)\n", |
8478c35a CS |
649 | op->request->payload_size, |
650 | sizeof(*request)); | |
55510843 JH |
651 | return -EINVAL; |
652 | } | |
653 | ||
cfb16906 | 654 | request = op->request->payload; |
ead35460 | 655 | |
cfb16906 | 656 | if (request->major > GB_SVC_VERSION_MAJOR) { |
2f3db927 | 657 | dev_warn(&svc->dev, "unsupported major version (%u > %u)\n", |
8478c35a | 658 | request->major, GB_SVC_VERSION_MAJOR); |
ead35460 VK |
659 | return -ENOTSUPP; |
660 | } | |
661 | ||
357de006 JH |
662 | svc->protocol_major = request->major; |
663 | svc->protocol_minor = request->minor; | |
3ea959e3 | 664 | |
684156a9 | 665 | if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) |
ead35460 | 666 | return -ENOMEM; |
ead35460 | 667 | |
cfb16906 | 668 | response = op->response->payload; |
357de006 JH |
669 | response->major = svc->protocol_major; |
670 | response->minor = svc->protocol_minor; | |
59832931 | 671 | |
ead35460 VK |
672 | return 0; |
673 | } | |
674 | ||
9504677c DL |
675 | static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf, |
676 | size_t len, loff_t *offset) | |
677 | { | |
461ab807 GK |
678 | struct svc_debugfs_pwrmon_rail *pwrmon_rails = |
679 | file_inode(file)->i_private; | |
9504677c DL |
680 | struct gb_svc *svc = pwrmon_rails->svc; |
681 | int ret, desc; | |
682 | u32 value; | |
683 | char buff[16]; | |
684 | ||
685 | ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, | |
686 | GB_SVC_PWRMON_TYPE_VOL, &value); | |
687 | if (ret) { | |
688 | dev_err(&svc->dev, | |
89f2df43 JH |
689 | "failed to get voltage sample %u: %d\n", |
690 | pwrmon_rails->id, ret); | |
9504677c DL |
691 | return ret; |
692 | } | |
693 | ||
694 | desc = scnprintf(buff, sizeof(buff), "%u\n", value); | |
695 | ||
696 | return simple_read_from_buffer(buf, len, offset, buff, desc); | |
697 | } | |
698 | ||
699 | static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf, | |
700 | size_t len, loff_t *offset) | |
701 | { | |
461ab807 GK |
702 | struct svc_debugfs_pwrmon_rail *pwrmon_rails = |
703 | file_inode(file)->i_private; | |
9504677c DL |
704 | struct gb_svc *svc = pwrmon_rails->svc; |
705 | int ret, desc; | |
706 | u32 value; | |
707 | char buff[16]; | |
708 | ||
709 | ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, | |
710 | GB_SVC_PWRMON_TYPE_CURR, &value); | |
711 | if (ret) { | |
712 | dev_err(&svc->dev, | |
89f2df43 JH |
713 | "failed to get current sample %u: %d\n", |
714 | pwrmon_rails->id, ret); | |
9504677c DL |
715 | return ret; |
716 | } | |
717 | ||
718 | desc = scnprintf(buff, sizeof(buff), "%u\n", value); | |
719 | ||
720 | return simple_read_from_buffer(buf, len, offset, buff, desc); | |
721 | } | |
722 | ||
723 | static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf, | |
724 | size_t len, loff_t *offset) | |
725 | { | |
461ab807 GK |
726 | struct svc_debugfs_pwrmon_rail *pwrmon_rails = |
727 | file_inode(file)->i_private; | |
9504677c DL |
728 | struct gb_svc *svc = pwrmon_rails->svc; |
729 | int ret, desc; | |
730 | u32 value; | |
731 | char buff[16]; | |
732 | ||
733 | ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, | |
734 | GB_SVC_PWRMON_TYPE_PWR, &value); | |
735 | if (ret) { | |
89f2df43 JH |
736 | dev_err(&svc->dev, "failed to get power sample %u: %d\n", |
737 | pwrmon_rails->id, ret); | |
9504677c DL |
738 | return ret; |
739 | } | |
740 | ||
741 | desc = scnprintf(buff, sizeof(buff), "%u\n", value); | |
742 | ||
743 | return simple_read_from_buffer(buf, len, offset, buff, desc); | |
744 | } | |
745 | ||
746 | static const struct file_operations pwrmon_debugfs_voltage_fops = { | |
747 | .read = pwr_debugfs_voltage_read, | |
748 | }; | |
749 | ||
750 | static const struct file_operations pwrmon_debugfs_current_fops = { | |
751 | .read = pwr_debugfs_current_read, | |
752 | }; | |
753 | ||
754 | static const struct file_operations pwrmon_debugfs_power_fops = { | |
755 | .read = pwr_debugfs_power_read, | |
756 | }; | |
757 | ||
12185197 | 758 | static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc) |
9504677c DL |
759 | { |
760 | int i; | |
761 | size_t bufsize; | |
762 | struct dentry *dent; | |
3fd747a6 DL |
763 | struct gb_svc_pwrmon_rail_names_get_response *rail_names; |
764 | u8 rail_count; | |
9504677c DL |
765 | |
766 | dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry); | |
767 | if (IS_ERR_OR_NULL(dent)) | |
768 | return; | |
769 | ||
3fd747a6 | 770 | if (gb_svc_pwrmon_rail_count_get(svc, &rail_count)) |
9504677c DL |
771 | goto err_pwrmon_debugfs; |
772 | ||
3fd747a6 | 773 | if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT) |
9504677c DL |
774 | goto err_pwrmon_debugfs; |
775 | ||
8fb76c3c DL |
776 | bufsize = sizeof(*rail_names) + |
777 | GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count; | |
9504677c | 778 | |
3fd747a6 DL |
779 | rail_names = kzalloc(bufsize, GFP_KERNEL); |
780 | if (!rail_names) | |
9504677c DL |
781 | goto err_pwrmon_debugfs; |
782 | ||
3fd747a6 | 783 | svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails), |
9504677c DL |
784 | GFP_KERNEL); |
785 | if (!svc->pwrmon_rails) | |
786 | goto err_pwrmon_debugfs_free; | |
787 | ||
3fd747a6 | 788 | if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize)) |
9504677c DL |
789 | goto err_pwrmon_debugfs_free; |
790 | ||
3fd747a6 | 791 | for (i = 0; i < rail_count; i++) { |
9504677c DL |
792 | struct dentry *dir; |
793 | struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i]; | |
794 | char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; | |
795 | ||
796 | snprintf(fname, sizeof(fname), "%s", | |
3fd747a6 | 797 | (char *)&rail_names->name[i]); |
9504677c DL |
798 | |
799 | rail->id = i; | |
800 | rail->svc = svc; | |
801 | ||
802 | dir = debugfs_create_dir(fname, dent); | |
563a8412 | 803 | debugfs_create_file("voltage_now", 0444, dir, rail, |
9504677c | 804 | &pwrmon_debugfs_voltage_fops); |
563a8412 | 805 | debugfs_create_file("current_now", 0444, dir, rail, |
9504677c | 806 | &pwrmon_debugfs_current_fops); |
563a8412 | 807 | debugfs_create_file("power_now", 0444, dir, rail, |
9504677c | 808 | &pwrmon_debugfs_power_fops); |
898d75f4 | 809 | } |
3fd747a6 DL |
810 | |
811 | kfree(rail_names); | |
9504677c DL |
812 | return; |
813 | ||
814 | err_pwrmon_debugfs_free: | |
3fd747a6 | 815 | kfree(rail_names); |
9504677c DL |
816 | kfree(svc->pwrmon_rails); |
817 | svc->pwrmon_rails = NULL; | |
818 | ||
819 | err_pwrmon_debugfs: | |
820 | debugfs_remove(dent); | |
821 | } | |
822 | ||
12185197 | 823 | static void gb_svc_debugfs_init(struct gb_svc *svc) |
9504677c DL |
824 | { |
825 | svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev), | |
826 | gb_debugfs_get()); | |
12185197 | 827 | gb_svc_pwrmon_debugfs_init(svc); |
9504677c DL |
828 | } |
829 | ||
12185197 | 830 | static void gb_svc_debugfs_exit(struct gb_svc *svc) |
9504677c DL |
831 | { |
832 | debugfs_remove_recursive(svc->debugfs_dentry); | |
9983ea6b DL |
833 | kfree(svc->pwrmon_rails); |
834 | svc->pwrmon_rails = NULL; | |
9504677c DL |
835 | } |
836 | ||
ead35460 VK |
837 | static int gb_svc_hello(struct gb_operation *op) |
838 | { | |
839 | struct gb_connection *connection = op->connection; | |
0ec30632 | 840 | struct gb_svc *svc = gb_connection_get_data(connection); |
ead35460 | 841 | struct gb_svc_hello_request *hello_request; |
ead35460 VK |
842 | int ret; |
843 | ||
0c32d2a5 | 844 | if (op->request->payload_size < sizeof(*hello_request)) { |
684156a9 | 845 | dev_warn(&svc->dev, "short hello request (%zu < %zu)\n", |
8478c35a CS |
846 | op->request->payload_size, |
847 | sizeof(*hello_request)); | |
ead35460 VK |
848 | return -EINVAL; |
849 | } | |
850 | ||
851 | hello_request = op->request->payload; | |
66069fb0 JH |
852 | svc->endo_id = le16_to_cpu(hello_request->endo_id); |
853 | svc->ap_intf_id = hello_request->interface_id; | |
ead35460 | 854 | |
88f7b96d JH |
855 | ret = device_add(&svc->dev); |
856 | if (ret) { | |
857 | dev_err(&svc->dev, "failed to register svc device: %d\n", ret); | |
858 | return ret; | |
859 | } | |
860 | ||
ed7279ae GKH |
861 | ret = gb_svc_watchdog_create(svc); |
862 | if (ret) { | |
863 | dev_err(&svc->dev, "failed to create watchdog: %d\n", ret); | |
4a448427 | 864 | goto err_unregister_device; |
ed7279ae GKH |
865 | } |
866 | ||
12185197 | 867 | gb_svc_debugfs_init(svc); |
9504677c | 868 | |
ee2f2074 | 869 | return gb_svc_queue_deferred_request(op); |
4a448427 BD |
870 | |
871 | err_unregister_device: | |
872 | gb_svc_watchdog_destroy(svc); | |
4a448427 BD |
873 | device_del(&svc->dev); |
874 | return ret; | |
ead35460 VK |
875 | } |
876 | ||
b482b0d6 | 877 | static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc, |
8478c35a | 878 | u8 intf_id) |
b482b0d6 JH |
879 | { |
880 | struct gb_host_device *hd = svc->hd; | |
881 | struct gb_module *module; | |
882 | size_t num_interfaces; | |
883 | u8 module_id; | |
884 | ||
885 | list_for_each_entry(module, &hd->modules, hd_node) { | |
886 | module_id = module->module_id; | |
887 | num_interfaces = module->num_interfaces; | |
888 | ||
889 | if (intf_id >= module_id && | |
8478c35a | 890 | intf_id < module_id + num_interfaces) { |
b482b0d6 JH |
891 | return module->interfaces[intf_id - module_id]; |
892 | } | |
893 | } | |
894 | ||
895 | return NULL; | |
896 | } | |
897 | ||
b15d97d7 JH |
898 | static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id) |
899 | { | |
900 | struct gb_host_device *hd = svc->hd; | |
901 | struct gb_module *module; | |
902 | ||
903 | list_for_each_entry(module, &hd->modules, hd_node) { | |
904 | if (module->module_id == module_id) | |
905 | return module; | |
906 | } | |
907 | ||
908 | return NULL; | |
909 | } | |
910 | ||
ee2f2074 MT |
911 | static void gb_svc_process_hello_deferred(struct gb_operation *operation) |
912 | { | |
913 | struct gb_connection *connection = operation->connection; | |
914 | struct gb_svc *svc = gb_connection_get_data(connection); | |
915 | int ret; | |
916 | ||
917 | /* | |
918 | * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch | |
919 | * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient | |
920 | * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged | |
921 | * module. | |
922 | * | |
923 | * The code should be removed once SW-2217, Heuristic for UniPro | |
924 | * Power Mode Changes is resolved. | |
925 | */ | |
926 | ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id, | |
461ab807 GK |
927 | GB_SVC_UNIPRO_HS_SERIES_A, |
928 | GB_SVC_UNIPRO_SLOW_AUTO_MODE, | |
929 | 2, 1, | |
930 | GB_SVC_SMALL_AMPLITUDE, | |
931 | GB_SVC_NO_DE_EMPHASIS, | |
932 | GB_SVC_UNIPRO_SLOW_AUTO_MODE, | |
933 | 2, 1, | |
934 | 0, 0, | |
935 | NULL, NULL); | |
ee2f2074 MT |
936 | |
937 | if (ret) | |
938 | dev_warn(&svc->dev, | |
8478c35a CS |
939 | "power mode change failed on AP to switch link: %d\n", |
940 | ret); | |
ee2f2074 MT |
941 | } |
942 | ||
22bb9380 JH |
943 | static void gb_svc_process_module_inserted(struct gb_operation *operation) |
944 | { | |
945 | struct gb_svc_module_inserted_request *request; | |
946 | struct gb_connection *connection = operation->connection; | |
947 | struct gb_svc *svc = gb_connection_get_data(connection); | |
948 | struct gb_host_device *hd = svc->hd; | |
949 | struct gb_module *module; | |
950 | size_t num_interfaces; | |
951 | u8 module_id; | |
952 | u16 flags; | |
953 | int ret; | |
954 | ||
955 | /* The request message size has already been verified. */ | |
956 | request = operation->request->payload; | |
957 | module_id = request->primary_intf_id; | |
958 | num_interfaces = request->intf_count; | |
959 | flags = le16_to_cpu(request->flags); | |
960 | ||
961 | dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n", | |
8478c35a | 962 | __func__, module_id, num_interfaces, flags); |
22bb9380 JH |
963 | |
964 | if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) { | |
965 | dev_warn(&svc->dev, "no primary interface detected on module %u\n", | |
8478c35a | 966 | module_id); |
22bb9380 JH |
967 | } |
968 | ||
969 | module = gb_svc_module_lookup(svc, module_id); | |
970 | if (module) { | |
971 | dev_warn(&svc->dev, "unexpected module-inserted event %u\n", | |
8478c35a | 972 | module_id); |
22bb9380 JH |
973 | return; |
974 | } | |
975 | ||
976 | module = gb_module_create(hd, module_id, num_interfaces); | |
977 | if (!module) { | |
978 | dev_err(&svc->dev, "failed to create module\n"); | |
979 | return; | |
980 | } | |
981 | ||
982 | ret = gb_module_add(module); | |
983 | if (ret) { | |
984 | gb_module_put(module); | |
985 | return; | |
986 | } | |
987 | ||
988 | list_add(&module->hd_node, &hd->modules); | |
989 | } | |
990 | ||
991 | static void gb_svc_process_module_removed(struct gb_operation *operation) | |
992 | { | |
993 | struct gb_svc_module_removed_request *request; | |
994 | struct gb_connection *connection = operation->connection; | |
995 | struct gb_svc *svc = gb_connection_get_data(connection); | |
996 | struct gb_module *module; | |
997 | u8 module_id; | |
998 | ||
999 | /* The request message size has already been verified. */ | |
1000 | request = operation->request->payload; | |
1001 | module_id = request->primary_intf_id; | |
1002 | ||
1003 | dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id); | |
1004 | ||
1005 | module = gb_svc_module_lookup(svc, module_id); | |
1006 | if (!module) { | |
1007 | dev_warn(&svc->dev, "unexpected module-removed event %u\n", | |
8478c35a | 1008 | module_id); |
22bb9380 JH |
1009 | return; |
1010 | } | |
1011 | ||
1012 | module->disconnected = true; | |
1013 | ||
1014 | gb_module_del(module); | |
1015 | list_del(&module->hd_node); | |
1016 | gb_module_put(module); | |
1017 | } | |
1018 | ||
57fa2de1 GD |
1019 | static void gb_svc_process_intf_oops(struct gb_operation *operation) |
1020 | { | |
1021 | struct gb_svc_intf_oops_request *request; | |
1022 | struct gb_connection *connection = operation->connection; | |
1023 | struct gb_svc *svc = gb_connection_get_data(connection); | |
1024 | struct gb_interface *intf; | |
1025 | u8 intf_id; | |
1026 | u8 reason; | |
1027 | ||
1028 | /* The request message size has already been verified. */ | |
1029 | request = operation->request->payload; | |
1030 | intf_id = request->intf_id; | |
1031 | reason = request->reason; | |
1032 | ||
1033 | intf = gb_svc_interface_lookup(svc, intf_id); | |
1034 | if (!intf) { | |
1035 | dev_warn(&svc->dev, "unexpected interface-oops event %u\n", | |
1036 | intf_id); | |
1037 | return; | |
1038 | } | |
1039 | ||
1040 | dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n", | |
1041 | intf_id, reason); | |
1042 | ||
1043 | mutex_lock(&intf->mutex); | |
1044 | intf->disconnected = true; | |
1045 | gb_interface_disable(intf); | |
1046 | gb_interface_deactivate(intf); | |
1047 | mutex_unlock(&intf->mutex); | |
1048 | } | |
1049 | ||
b482b0d6 JH |
1050 | static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation) |
1051 | { | |
1052 | struct gb_svc_intf_mailbox_event_request *request; | |
1053 | struct gb_connection *connection = operation->connection; | |
1054 | struct gb_svc *svc = gb_connection_get_data(connection); | |
1055 | struct gb_interface *intf; | |
1056 | u8 intf_id; | |
1057 | u16 result_code; | |
1058 | u32 mailbox; | |
1059 | ||
1060 | /* The request message size has already been verified. */ | |
1061 | request = operation->request->payload; | |
1062 | intf_id = request->intf_id; | |
1063 | result_code = le16_to_cpu(request->result_code); | |
1064 | mailbox = le32_to_cpu(request->mailbox); | |
1065 | ||
1066 | dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n", | |
8478c35a | 1067 | __func__, intf_id, result_code, mailbox); |
b482b0d6 JH |
1068 | |
1069 | intf = gb_svc_interface_lookup(svc, intf_id); | |
1070 | if (!intf) { | |
1071 | dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id); | |
1072 | return; | |
1073 | } | |
1074 | ||
55742d2a | 1075 | gb_interface_mailbox_event(intf, result_code, mailbox); |
b482b0d6 JH |
1076 | } |
1077 | ||
9ae4109e JH |
1078 | static void gb_svc_process_deferred_request(struct work_struct *work) |
1079 | { | |
1080 | struct gb_svc_deferred_request *dr; | |
1081 | struct gb_operation *operation; | |
1082 | struct gb_svc *svc; | |
1083 | u8 type; | |
1084 | ||
1085 | dr = container_of(work, struct gb_svc_deferred_request, work); | |
1086 | operation = dr->operation; | |
0ec30632 | 1087 | svc = gb_connection_get_data(operation->connection); |
9ae4109e JH |
1088 | type = operation->request->header->type; |
1089 | ||
1090 | switch (type) { | |
ee2f2074 MT |
1091 | case GB_SVC_TYPE_SVC_HELLO: |
1092 | gb_svc_process_hello_deferred(operation); | |
1093 | break; | |
22bb9380 JH |
1094 | case GB_SVC_TYPE_MODULE_INSERTED: |
1095 | gb_svc_process_module_inserted(operation); | |
1096 | break; | |
1097 | case GB_SVC_TYPE_MODULE_REMOVED: | |
1098 | gb_svc_process_module_removed(operation); | |
1099 | break; | |
b482b0d6 JH |
1100 | case GB_SVC_TYPE_INTF_MAILBOX_EVENT: |
1101 | gb_svc_process_intf_mailbox_event(operation); | |
1102 | break; | |
57fa2de1 GD |
1103 | case GB_SVC_TYPE_INTF_OOPS: |
1104 | gb_svc_process_intf_oops(operation); | |
1105 | break; | |
9ae4109e | 1106 | default: |
b933fa4a | 1107 | dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type); |
9ae4109e JH |
1108 | } |
1109 | ||
1110 | gb_operation_put(operation); | |
1111 | kfree(dr); | |
1112 | } | |
1113 | ||
1114 | static int gb_svc_queue_deferred_request(struct gb_operation *operation) | |
1115 | { | |
0ec30632 | 1116 | struct gb_svc *svc = gb_connection_get_data(operation->connection); |
9ae4109e JH |
1117 | struct gb_svc_deferred_request *dr; |
1118 | ||
1119 | dr = kmalloc(sizeof(*dr), GFP_KERNEL); | |
1120 | if (!dr) | |
1121 | return -ENOMEM; | |
1122 | ||
1123 | gb_operation_get(operation); | |
1124 | ||
1125 | dr->operation = operation; | |
1126 | INIT_WORK(&dr->work, gb_svc_process_deferred_request); | |
1127 | ||
3e48acac | 1128 | queue_work(svc->wq, &dr->work); |
9ae4109e JH |
1129 | |
1130 | return 0; | |
067906f6 | 1131 | } |
ead35460 | 1132 | |
30c6d9d7 AE |
1133 | static int gb_svc_intf_reset_recv(struct gb_operation *op) |
1134 | { | |
0ec30632 | 1135 | struct gb_svc *svc = gb_connection_get_data(op->connection); |
30c6d9d7 AE |
1136 | struct gb_message *request = op->request; |
1137 | struct gb_svc_intf_reset_request *reset; | |
30c6d9d7 AE |
1138 | |
1139 | if (request->payload_size < sizeof(*reset)) { | |
684156a9 | 1140 | dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n", |
8478c35a | 1141 | request->payload_size, sizeof(*reset)); |
30c6d9d7 AE |
1142 | return -EINVAL; |
1143 | } | |
1144 | reset = request->payload; | |
1145 | ||
30c6d9d7 AE |
1146 | /* FIXME Reset the interface here */ |
1147 | ||
1148 | return 0; | |
1149 | } | |
1150 | ||
22bb9380 JH |
1151 | static int gb_svc_module_inserted_recv(struct gb_operation *op) |
1152 | { | |
1153 | struct gb_svc *svc = gb_connection_get_data(op->connection); | |
1154 | struct gb_svc_module_inserted_request *request; | |
1155 | ||
1156 | if (op->request->payload_size < sizeof(*request)) { | |
1157 | dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n", | |
8478c35a | 1158 | op->request->payload_size, sizeof(*request)); |
22bb9380 JH |
1159 | return -EINVAL; |
1160 | } | |
1161 | ||
1162 | request = op->request->payload; | |
1163 | ||
1164 | dev_dbg(&svc->dev, "%s - id = %u\n", __func__, | |
8478c35a | 1165 | request->primary_intf_id); |
22bb9380 JH |
1166 | |
1167 | return gb_svc_queue_deferred_request(op); | |
1168 | } | |
1169 | ||
1170 | static int gb_svc_module_removed_recv(struct gb_operation *op) | |
1171 | { | |
1172 | struct gb_svc *svc = gb_connection_get_data(op->connection); | |
1173 | struct gb_svc_module_removed_request *request; | |
1174 | ||
1175 | if (op->request->payload_size < sizeof(*request)) { | |
1176 | dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n", | |
8478c35a | 1177 | op->request->payload_size, sizeof(*request)); |
22bb9380 JH |
1178 | return -EINVAL; |
1179 | } | |
1180 | ||
1181 | request = op->request->payload; | |
1182 | ||
1183 | dev_dbg(&svc->dev, "%s - id = %u\n", __func__, | |
8478c35a | 1184 | request->primary_intf_id); |
22bb9380 JH |
1185 | |
1186 | return gb_svc_queue_deferred_request(op); | |
1187 | } | |
1188 | ||
57fa2de1 GD |
1189 | static int gb_svc_intf_oops_recv(struct gb_operation *op) |
1190 | { | |
1191 | struct gb_svc *svc = gb_connection_get_data(op->connection); | |
1192 | struct gb_svc_intf_oops_request *request; | |
1193 | ||
1194 | if (op->request->payload_size < sizeof(*request)) { | |
1195 | dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n", | |
1196 | op->request->payload_size, sizeof(*request)); | |
1197 | return -EINVAL; | |
1198 | } | |
1199 | ||
1200 | return gb_svc_queue_deferred_request(op); | |
1201 | } | |
1202 | ||
b482b0d6 JH |
1203 | static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op) |
1204 | { | |
1205 | struct gb_svc *svc = gb_connection_get_data(op->connection); | |
1206 | struct gb_svc_intf_mailbox_event_request *request; | |
1207 | ||
1208 | if (op->request->payload_size < sizeof(*request)) { | |
1209 | dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n", | |
8478c35a | 1210 | op->request->payload_size, sizeof(*request)); |
b482b0d6 JH |
1211 | return -EINVAL; |
1212 | } | |
1213 | ||
1214 | request = op->request->payload; | |
1215 | ||
1216 | dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id); | |
1217 | ||
1218 | return gb_svc_queue_deferred_request(op); | |
1219 | } | |
1220 | ||
84427943 | 1221 | static int gb_svc_request_handler(struct gb_operation *op) |
30c6d9d7 | 1222 | { |
3ccb1600 | 1223 | struct gb_connection *connection = op->connection; |
0ec30632 | 1224 | struct gb_svc *svc = gb_connection_get_data(connection); |
84427943 | 1225 | u8 type = op->type; |
3ccb1600 VK |
1226 | int ret = 0; |
1227 | ||
1228 | /* | |
1229 | * SVC requests need to follow a specific order (at least initially) and | |
1230 | * below code takes care of enforcing that. The expected order is: | |
1231 | * - PROTOCOL_VERSION | |
1232 | * - SVC_HELLO | |
1233 | * - Any other request, but the earlier two. | |
1234 | * | |
1235 | * Incoming requests are guaranteed to be serialized and so we don't | |
1236 | * need to protect 'state' for any races. | |
1237 | */ | |
30c6d9d7 | 1238 | switch (type) { |
a2cf2e59 | 1239 | case GB_SVC_TYPE_PROTOCOL_VERSION: |
3ccb1600 VK |
1240 | if (svc->state != GB_SVC_STATE_RESET) |
1241 | ret = -EINVAL; | |
1242 | break; | |
ead35460 | 1243 | case GB_SVC_TYPE_SVC_HELLO: |
3ccb1600 VK |
1244 | if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION) |
1245 | ret = -EINVAL; | |
1246 | break; | |
1247 | default: | |
1248 | if (svc->state != GB_SVC_STATE_SVC_HELLO) | |
1249 | ret = -EINVAL; | |
1250 | break; | |
1251 | } | |
1252 | ||
1253 | if (ret) { | |
684156a9 | 1254 | dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n", |
8478c35a | 1255 | type, svc->state); |
3ccb1600 VK |
1256 | return ret; |
1257 | } | |
1258 | ||
1259 | switch (type) { | |
a2cf2e59 | 1260 | case GB_SVC_TYPE_PROTOCOL_VERSION: |
3ccb1600 VK |
1261 | ret = gb_svc_version_request(op); |
1262 | if (!ret) | |
1263 | svc->state = GB_SVC_STATE_PROTOCOL_VERSION; | |
1264 | return ret; | |
1265 | case GB_SVC_TYPE_SVC_HELLO: | |
1266 | ret = gb_svc_hello(op); | |
1267 | if (!ret) | |
1268 | svc->state = GB_SVC_STATE_SVC_HELLO; | |
1269 | return ret; | |
30c6d9d7 AE |
1270 | case GB_SVC_TYPE_INTF_RESET: |
1271 | return gb_svc_intf_reset_recv(op); | |
22bb9380 JH |
1272 | case GB_SVC_TYPE_MODULE_INSERTED: |
1273 | return gb_svc_module_inserted_recv(op); | |
1274 | case GB_SVC_TYPE_MODULE_REMOVED: | |
1275 | return gb_svc_module_removed_recv(op); | |
b482b0d6 JH |
1276 | case GB_SVC_TYPE_INTF_MAILBOX_EVENT: |
1277 | return gb_svc_intf_mailbox_event_recv(op); | |
57fa2de1 GD |
1278 | case GB_SVC_TYPE_INTF_OOPS: |
1279 | return gb_svc_intf_oops_recv(op); | |
30c6d9d7 | 1280 | default: |
684156a9 | 1281 | dev_warn(&svc->dev, "unsupported request 0x%02x\n", type); |
30c6d9d7 AE |
1282 | return -EINVAL; |
1283 | } | |
1284 | } | |
1285 | ||
efe6ef76 JH |
1286 | static void gb_svc_release(struct device *dev) |
1287 | { | |
88f7b96d | 1288 | struct gb_svc *svc = to_gb_svc(dev); |
efe6ef76 | 1289 | |
7adeaae7 JH |
1290 | if (svc->connection) |
1291 | gb_connection_destroy(svc->connection); | |
efe6ef76 | 1292 | ida_destroy(&svc->device_id_map); |
3e48acac | 1293 | destroy_workqueue(svc->wq); |
efe6ef76 JH |
1294 | kfree(svc); |
1295 | } | |
1296 | ||
1297 | struct device_type greybus_svc_type = { | |
1298 | .name = "greybus_svc", | |
1299 | .release = gb_svc_release, | |
1300 | }; | |
1301 | ||
7adeaae7 | 1302 | struct gb_svc *gb_svc_create(struct gb_host_device *hd) |
30c6d9d7 AE |
1303 | { |
1304 | struct gb_svc *svc; | |
30c6d9d7 AE |
1305 | |
1306 | svc = kzalloc(sizeof(*svc), GFP_KERNEL); | |
1307 | if (!svc) | |
7adeaae7 | 1308 | return NULL; |
30c6d9d7 | 1309 | |
3e48acac JH |
1310 | svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev)); |
1311 | if (!svc->wq) { | |
1312 | kfree(svc); | |
7adeaae7 | 1313 | return NULL; |
3e48acac JH |
1314 | } |
1315 | ||
efe6ef76 JH |
1316 | svc->dev.parent = &hd->dev; |
1317 | svc->dev.bus = &greybus_bus_type; | |
1318 | svc->dev.type = &greybus_svc_type; | |
66069fb0 | 1319 | svc->dev.groups = svc_groups; |
efe6ef76 JH |
1320 | svc->dev.dma_mask = svc->dev.parent->dma_mask; |
1321 | device_initialize(&svc->dev); | |
1322 | ||
1323 | dev_set_name(&svc->dev, "%d-svc", hd->bus_id); | |
1324 | ||
6106e51b | 1325 | ida_init(&svc->device_id_map); |
3ccb1600 | 1326 | svc->state = GB_SVC_STATE_RESET; |
f0960d05 | 1327 | svc->hd = hd; |
d3d44840 | 1328 | |
f7ee081e | 1329 | svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID, |
8478c35a | 1330 | gb_svc_request_handler); |
24e094d6 JH |
1331 | if (IS_ERR(svc->connection)) { |
1332 | dev_err(&svc->dev, "failed to create connection: %ld\n", | |
8478c35a | 1333 | PTR_ERR(svc->connection)); |
1beb1fae | 1334 | goto err_put_device; |
7adeaae7 JH |
1335 | } |
1336 | ||
0ec30632 | 1337 | gb_connection_set_data(svc->connection, svc); |
efe6ef76 | 1338 | |
7adeaae7 | 1339 | return svc; |
ebe99d61 | 1340 | |
ebe99d61 RMS |
1341 | err_put_device: |
1342 | put_device(&svc->dev); | |
1343 | return NULL; | |
30c6d9d7 AE |
1344 | } |
1345 | ||
7adeaae7 | 1346 | int gb_svc_add(struct gb_svc *svc) |
30c6d9d7 | 1347 | { |
7adeaae7 | 1348 | int ret; |
30c6d9d7 | 1349 | |
7adeaae7 JH |
1350 | /* |
1351 | * The SVC protocol is currently driven by the SVC, so the SVC device | |
1352 | * is added from the connection request handler when enough | |
1353 | * information has been received. | |
1354 | */ | |
f7ee081e | 1355 | ret = gb_connection_enable(svc->connection); |
7adeaae7 JH |
1356 | if (ret) |
1357 | return ret; | |
1358 | ||
1359 | return 0; | |
1360 | } | |
1361 | ||
b15d97d7 | 1362 | static void gb_svc_remove_modules(struct gb_svc *svc) |
66d674cf | 1363 | { |
b15d97d7 JH |
1364 | struct gb_host_device *hd = svc->hd; |
1365 | struct gb_module *module, *tmp; | |
66d674cf | 1366 | |
b15d97d7 JH |
1367 | list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) { |
1368 | gb_module_del(module); | |
1369 | list_del(&module->hd_node); | |
1370 | gb_module_put(module); | |
629c0d00 | 1371 | } |
66d674cf JH |
1372 | } |
1373 | ||
7adeaae7 JH |
1374 | void gb_svc_del(struct gb_svc *svc) |
1375 | { | |
d1a8c36e | 1376 | gb_connection_disable_rx(svc->connection); |
ebe99d61 | 1377 | |
7adeaae7 | 1378 | /* |
1beb1fae | 1379 | * The SVC device may have been registered from the request handler. |
7adeaae7 | 1380 | */ |
ebe99d61 | 1381 | if (device_is_registered(&svc->dev)) { |
12185197 | 1382 | gb_svc_debugfs_exit(svc); |
ed7279ae | 1383 | gb_svc_watchdog_destroy(svc); |
88f7b96d | 1384 | device_del(&svc->dev); |
ebe99d61 | 1385 | } |
1cacb456 | 1386 | |
7adeaae7 | 1387 | flush_workqueue(svc->wq); |
66d674cf | 1388 | |
b15d97d7 | 1389 | gb_svc_remove_modules(svc); |
d1a8c36e VK |
1390 | |
1391 | gb_connection_disable(svc->connection); | |
7adeaae7 | 1392 | } |
efe6ef76 | 1393 | |
7adeaae7 JH |
1394 | void gb_svc_put(struct gb_svc *svc) |
1395 | { | |
efe6ef76 | 1396 | put_device(&svc->dev); |
30c6d9d7 | 1397 | } |