]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0 |
49626ea8 SW |
2 | /* |
3 | * Copyright (c) 2016, NVIDIA CORPORATION. | |
49626ea8 SW |
4 | */ |
5 | ||
6 | #include <common.h> | |
1eb69ae4 | 7 | #include <cpu_func.h> |
49626ea8 SW |
8 | #include <asm/io.h> |
9 | #include <asm/arch-tegra/ivc.h> | |
10 | ||
11 | #define TEGRA_IVC_ALIGN 64 | |
12 | ||
13 | /* | |
14 | * IVC channel reset protocol. | |
15 | * | |
16 | * Each end uses its tx_channel.state to indicate its synchronization state. | |
17 | */ | |
18 | enum ivc_state { | |
19 | /* | |
20 | * This value is zero for backwards compatibility with services that | |
21 | * assume channels to be initially zeroed. Such channels are in an | |
22 | * initially valid state, but cannot be asynchronously reset, and must | |
23 | * maintain a valid state at all times. | |
24 | * | |
25 | * The transmitting end can enter the established state from the sync or | |
26 | * ack state when it observes the receiving endpoint in the ack or | |
27 | * established state, indicating that has cleared the counters in our | |
28 | * rx_channel. | |
29 | */ | |
30 | ivc_state_established = 0, | |
31 | ||
32 | /* | |
33 | * If an endpoint is observed in the sync state, the remote endpoint is | |
34 | * allowed to clear the counters it owns asynchronously with respect to | |
35 | * the current endpoint. Therefore, the current endpoint is no longer | |
36 | * allowed to communicate. | |
37 | */ | |
38 | ivc_state_sync, | |
39 | ||
40 | /* | |
41 | * When the transmitting end observes the receiving end in the sync | |
42 | * state, it can clear the w_count and r_count and transition to the ack | |
43 | * state. If the remote endpoint observes us in the ack state, it can | |
44 | * return to the established state once it has cleared its counters. | |
45 | */ | |
46 | ivc_state_ack | |
47 | }; | |
48 | ||
49 | /* | |
50 | * This structure is divided into two-cache aligned parts, the first is only | |
51 | * written through the tx_channel pointer, while the second is only written | |
52 | * through the rx_channel pointer. This delineates ownership of the cache lines, | |
53 | * which is critical to performance and necessary in non-cache coherent | |
54 | * implementations. | |
55 | */ | |
56 | struct tegra_ivc_channel_header { | |
57 | union { | |
58 | /* fields owned by the transmitting end */ | |
59 | struct { | |
60 | uint32_t w_count; | |
61 | uint32_t state; | |
62 | }; | |
63 | uint8_t w_align[TEGRA_IVC_ALIGN]; | |
64 | }; | |
65 | union { | |
66 | /* fields owned by the receiving end */ | |
67 | uint32_t r_count; | |
68 | uint8_t r_align[TEGRA_IVC_ALIGN]; | |
69 | }; | |
70 | }; | |
71 | ||
72 | static inline void tegra_ivc_invalidate_counter(struct tegra_ivc *ivc, | |
73 | struct tegra_ivc_channel_header *h, | |
74 | ulong offset) | |
75 | { | |
76 | ulong base = ((ulong)h) + offset; | |
77 | invalidate_dcache_range(base, base + TEGRA_IVC_ALIGN); | |
78 | } | |
79 | ||
80 | static inline void tegra_ivc_flush_counter(struct tegra_ivc *ivc, | |
81 | struct tegra_ivc_channel_header *h, | |
82 | ulong offset) | |
83 | { | |
84 | ulong base = ((ulong)h) + offset; | |
85 | flush_dcache_range(base, base + TEGRA_IVC_ALIGN); | |
86 | } | |
87 | ||
88 | static inline ulong tegra_ivc_frame_addr(struct tegra_ivc *ivc, | |
89 | struct tegra_ivc_channel_header *h, | |
90 | uint32_t frame) | |
91 | { | |
92 | BUG_ON(frame >= ivc->nframes); | |
93 | ||
94 | return ((ulong)h) + sizeof(struct tegra_ivc_channel_header) + | |
95 | (ivc->frame_size * frame); | |
96 | } | |
97 | ||
98 | static inline void *tegra_ivc_frame_pointer(struct tegra_ivc *ivc, | |
99 | struct tegra_ivc_channel_header *ch, | |
100 | uint32_t frame) | |
101 | { | |
102 | return (void *)tegra_ivc_frame_addr(ivc, ch, frame); | |
103 | } | |
104 | ||
105 | static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc, | |
106 | struct tegra_ivc_channel_header *h, | |
107 | unsigned frame) | |
108 | { | |
109 | ulong base = tegra_ivc_frame_addr(ivc, h, frame); | |
110 | invalidate_dcache_range(base, base + ivc->frame_size); | |
111 | } | |
112 | ||
113 | static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc, | |
114 | struct tegra_ivc_channel_header *h, | |
115 | unsigned frame) | |
116 | { | |
117 | ulong base = tegra_ivc_frame_addr(ivc, h, frame); | |
118 | flush_dcache_range(base, base + ivc->frame_size); | |
119 | } | |
120 | ||
121 | static inline int tegra_ivc_channel_empty(struct tegra_ivc *ivc, | |
122 | struct tegra_ivc_channel_header *ch) | |
123 | { | |
124 | /* | |
125 | * This function performs multiple checks on the same values with | |
126 | * security implications, so create snapshots with ACCESS_ONCE() to | |
127 | * ensure that these checks use the same values. | |
128 | */ | |
129 | uint32_t w_count = ACCESS_ONCE(ch->w_count); | |
130 | uint32_t r_count = ACCESS_ONCE(ch->r_count); | |
131 | ||
132 | /* | |
133 | * Perform an over-full check to prevent denial of service attacks where | |
134 | * a server could be easily fooled into believing that there's an | |
135 | * extremely large number of frames ready, since receivers are not | |
136 | * expected to check for full or over-full conditions. | |
137 | * | |
138 | * Although the channel isn't empty, this is an invalid case caused by | |
139 | * a potentially malicious peer, so returning empty is safer, because it | |
140 | * gives the impression that the channel has gone silent. | |
141 | */ | |
142 | if (w_count - r_count > ivc->nframes) | |
143 | return 1; | |
144 | ||
145 | return w_count == r_count; | |
146 | } | |
147 | ||
148 | static inline int tegra_ivc_channel_full(struct tegra_ivc *ivc, | |
149 | struct tegra_ivc_channel_header *ch) | |
150 | { | |
151 | /* | |
152 | * Invalid cases where the counters indicate that the queue is over | |
153 | * capacity also appear full. | |
154 | */ | |
155 | return (ACCESS_ONCE(ch->w_count) - ACCESS_ONCE(ch->r_count)) >= | |
156 | ivc->nframes; | |
157 | } | |
158 | ||
159 | static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc) | |
160 | { | |
161 | ACCESS_ONCE(ivc->rx_channel->r_count) = | |
162 | ACCESS_ONCE(ivc->rx_channel->r_count) + 1; | |
163 | ||
164 | if (ivc->r_pos == ivc->nframes - 1) | |
165 | ivc->r_pos = 0; | |
166 | else | |
167 | ivc->r_pos++; | |
168 | } | |
169 | ||
170 | static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc) | |
171 | { | |
172 | ACCESS_ONCE(ivc->tx_channel->w_count) = | |
173 | ACCESS_ONCE(ivc->tx_channel->w_count) + 1; | |
174 | ||
175 | if (ivc->w_pos == ivc->nframes - 1) | |
176 | ivc->w_pos = 0; | |
177 | else | |
178 | ivc->w_pos++; | |
179 | } | |
180 | ||
181 | static inline int tegra_ivc_check_read(struct tegra_ivc *ivc) | |
182 | { | |
183 | ulong offset; | |
184 | ||
185 | /* | |
186 | * tx_channel->state is set locally, so it is not synchronized with | |
187 | * state from the remote peer. The remote peer cannot reset its | |
188 | * transmit counters until we've acknowledged its synchronization | |
189 | * request, so no additional synchronization is required because an | |
190 | * asynchronous transition of rx_channel->state to ivc_state_ack is not | |
191 | * allowed. | |
192 | */ | |
193 | if (ivc->tx_channel->state != ivc_state_established) | |
194 | return -ECONNRESET; | |
195 | ||
196 | /* | |
197 | * Avoid unnecessary invalidations when performing repeated accesses to | |
198 | * an IVC channel by checking the old queue pointers first. | |
199 | * Synchronization is only necessary when these pointers indicate empty | |
200 | * or full. | |
201 | */ | |
202 | if (!tegra_ivc_channel_empty(ivc, ivc->rx_channel)) | |
203 | return 0; | |
204 | ||
205 | offset = offsetof(struct tegra_ivc_channel_header, w_count); | |
206 | tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset); | |
207 | return tegra_ivc_channel_empty(ivc, ivc->rx_channel) ? -ENOMEM : 0; | |
208 | } | |
209 | ||
210 | static inline int tegra_ivc_check_write(struct tegra_ivc *ivc) | |
211 | { | |
212 | ulong offset; | |
213 | ||
214 | if (ivc->tx_channel->state != ivc_state_established) | |
215 | return -ECONNRESET; | |
216 | ||
217 | if (!tegra_ivc_channel_full(ivc, ivc->tx_channel)) | |
218 | return 0; | |
219 | ||
220 | offset = offsetof(struct tegra_ivc_channel_header, r_count); | |
221 | tegra_ivc_invalidate_counter(ivc, ivc->tx_channel, offset); | |
222 | return tegra_ivc_channel_full(ivc, ivc->tx_channel) ? -ENOMEM : 0; | |
223 | } | |
224 | ||
225 | static inline uint32_t tegra_ivc_channel_avail_count(struct tegra_ivc *ivc, | |
226 | struct tegra_ivc_channel_header *ch) | |
227 | { | |
228 | /* | |
229 | * This function isn't expected to be used in scenarios where an | |
230 | * over-full situation can lead to denial of service attacks. See the | |
231 | * comment in tegra_ivc_channel_empty() for an explanation about | |
232 | * special over-full considerations. | |
233 | */ | |
234 | return ACCESS_ONCE(ch->w_count) - ACCESS_ONCE(ch->r_count); | |
235 | } | |
236 | ||
237 | int tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc, void **frame) | |
238 | { | |
239 | int result = tegra_ivc_check_read(ivc); | |
240 | if (result < 0) | |
241 | return result; | |
242 | ||
243 | /* | |
244 | * Order observation of w_pos potentially indicating new data before | |
245 | * data read. | |
246 | */ | |
247 | mb(); | |
248 | ||
249 | tegra_ivc_invalidate_frame(ivc, ivc->rx_channel, ivc->r_pos); | |
250 | *frame = tegra_ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos); | |
251 | ||
252 | return 0; | |
253 | } | |
254 | ||
255 | int tegra_ivc_read_advance(struct tegra_ivc *ivc) | |
256 | { | |
257 | ulong offset; | |
258 | int result; | |
259 | ||
260 | /* | |
261 | * No read barriers or synchronization here: the caller is expected to | |
262 | * have already observed the channel non-empty. This check is just to | |
263 | * catch programming errors. | |
264 | */ | |
265 | result = tegra_ivc_check_read(ivc); | |
266 | if (result) | |
267 | return result; | |
268 | ||
269 | tegra_ivc_advance_rx(ivc); | |
270 | offset = offsetof(struct tegra_ivc_channel_header, r_count); | |
271 | tegra_ivc_flush_counter(ivc, ivc->rx_channel, offset); | |
272 | ||
273 | /* | |
274 | * Ensure our write to r_pos occurs before our read from w_pos. | |
275 | */ | |
276 | mb(); | |
277 | ||
278 | offset = offsetof(struct tegra_ivc_channel_header, w_count); | |
279 | tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset); | |
280 | ||
281 | if (tegra_ivc_channel_avail_count(ivc, ivc->rx_channel) == | |
282 | ivc->nframes - 1) | |
283 | ivc->notify(ivc); | |
284 | ||
285 | return 0; | |
286 | } | |
287 | ||
288 | int tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc, void **frame) | |
289 | { | |
290 | int result = tegra_ivc_check_write(ivc); | |
291 | if (result) | |
292 | return result; | |
293 | ||
294 | *frame = tegra_ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos); | |
295 | ||
296 | return 0; | |
297 | } | |
298 | ||
299 | int tegra_ivc_write_advance(struct tegra_ivc *ivc) | |
300 | { | |
301 | ulong offset; | |
302 | int result; | |
303 | ||
304 | result = tegra_ivc_check_write(ivc); | |
305 | if (result) | |
306 | return result; | |
307 | ||
308 | tegra_ivc_flush_frame(ivc, ivc->tx_channel, ivc->w_pos); | |
309 | ||
310 | /* | |
311 | * Order any possible stores to the frame before update of w_pos. | |
312 | */ | |
313 | mb(); | |
314 | ||
315 | tegra_ivc_advance_tx(ivc); | |
316 | offset = offsetof(struct tegra_ivc_channel_header, w_count); | |
317 | tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset); | |
318 | ||
319 | /* | |
320 | * Ensure our write to w_pos occurs before our read from r_pos. | |
321 | */ | |
322 | mb(); | |
323 | ||
324 | offset = offsetof(struct tegra_ivc_channel_header, r_count); | |
325 | tegra_ivc_invalidate_counter(ivc, ivc->tx_channel, offset); | |
326 | ||
327 | if (tegra_ivc_channel_avail_count(ivc, ivc->tx_channel) == 1) | |
328 | ivc->notify(ivc); | |
329 | ||
330 | return 0; | |
331 | } | |
332 | ||
333 | /* | |
334 | * =============================================================== | |
335 | * IVC State Transition Table - see tegra_ivc_channel_notified() | |
336 | * =============================================================== | |
337 | * | |
338 | * local remote action | |
339 | * ----- ------ ----------------------------------- | |
340 | * SYNC EST <none> | |
341 | * SYNC ACK reset counters; move to EST; notify | |
342 | * SYNC SYNC reset counters; move to ACK; notify | |
343 | * ACK EST move to EST; notify | |
344 | * ACK ACK move to EST; notify | |
345 | * ACK SYNC reset counters; move to ACK; notify | |
346 | * EST EST <none> | |
347 | * EST ACK <none> | |
348 | * EST SYNC reset counters; move to ACK; notify | |
349 | * | |
350 | * =============================================================== | |
351 | */ | |
352 | int tegra_ivc_channel_notified(struct tegra_ivc *ivc) | |
353 | { | |
354 | ulong offset; | |
355 | enum ivc_state peer_state; | |
356 | ||
357 | /* Copy the receiver's state out of shared memory. */ | |
358 | offset = offsetof(struct tegra_ivc_channel_header, w_count); | |
359 | tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset); | |
360 | peer_state = ACCESS_ONCE(ivc->rx_channel->state); | |
361 | ||
362 | if (peer_state == ivc_state_sync) { | |
363 | /* | |
364 | * Order observation of ivc_state_sync before stores clearing | |
365 | * tx_channel. | |
366 | */ | |
367 | mb(); | |
368 | ||
369 | /* | |
370 | * Reset tx_channel counters. The remote end is in the SYNC | |
371 | * state and won't make progress until we change our state, | |
372 | * so the counters are not in use at this time. | |
373 | */ | |
374 | ivc->tx_channel->w_count = 0; | |
375 | ivc->rx_channel->r_count = 0; | |
376 | ||
377 | ivc->w_pos = 0; | |
378 | ivc->r_pos = 0; | |
379 | ||
380 | /* | |
381 | * Ensure that counters appear cleared before new state can be | |
382 | * observed. | |
383 | */ | |
384 | mb(); | |
385 | ||
386 | /* | |
387 | * Move to ACK state. We have just cleared our counters, so it | |
388 | * is now safe for the remote end to start using these values. | |
389 | */ | |
390 | ivc->tx_channel->state = ivc_state_ack; | |
391 | offset = offsetof(struct tegra_ivc_channel_header, w_count); | |
392 | tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset); | |
393 | ||
394 | /* | |
395 | * Notify remote end to observe state transition. | |
396 | */ | |
397 | ivc->notify(ivc); | |
398 | } else if (ivc->tx_channel->state == ivc_state_sync && | |
399 | peer_state == ivc_state_ack) { | |
400 | /* | |
401 | * Order observation of ivc_state_sync before stores clearing | |
402 | * tx_channel. | |
403 | */ | |
404 | mb(); | |
405 | ||
406 | /* | |
407 | * Reset tx_channel counters. The remote end is in the ACK | |
408 | * state and won't make progress until we change our state, | |
409 | * so the counters are not in use at this time. | |
410 | */ | |
411 | ivc->tx_channel->w_count = 0; | |
412 | ivc->rx_channel->r_count = 0; | |
413 | ||
414 | ivc->w_pos = 0; | |
415 | ivc->r_pos = 0; | |
416 | ||
417 | /* | |
418 | * Ensure that counters appear cleared before new state can be | |
419 | * observed. | |
420 | */ | |
421 | mb(); | |
422 | ||
423 | /* | |
424 | * Move to ESTABLISHED state. We know that the remote end has | |
425 | * already cleared its counters, so it is safe to start | |
426 | * writing/reading on this channel. | |
427 | */ | |
428 | ivc->tx_channel->state = ivc_state_established; | |
429 | offset = offsetof(struct tegra_ivc_channel_header, w_count); | |
430 | tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset); | |
431 | ||
432 | /* | |
433 | * Notify remote end to observe state transition. | |
434 | */ | |
435 | ivc->notify(ivc); | |
436 | } else if (ivc->tx_channel->state == ivc_state_ack) { | |
437 | /* | |
438 | * At this point, we have observed the peer to be in either | |
439 | * the ACK or ESTABLISHED state. Next, order observation of | |
440 | * peer state before storing to tx_channel. | |
441 | */ | |
442 | mb(); | |
443 | ||
444 | /* | |
445 | * Move to ESTABLISHED state. We know that we have previously | |
446 | * cleared our counters, and we know that the remote end has | |
447 | * cleared its counters, so it is safe to start writing/reading | |
448 | * on this channel. | |
449 | */ | |
450 | ivc->tx_channel->state = ivc_state_established; | |
451 | offset = offsetof(struct tegra_ivc_channel_header, w_count); | |
452 | tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset); | |
453 | ||
454 | /* | |
455 | * Notify remote end to observe state transition. | |
456 | */ | |
457 | ivc->notify(ivc); | |
458 | } else { | |
459 | /* | |
460 | * There is no need to handle any further action. Either the | |
461 | * channel is already fully established, or we are waiting for | |
462 | * the remote end to catch up with our current state. Refer | |
463 | * to the diagram in "IVC State Transition Table" above. | |
464 | */ | |
465 | } | |
466 | ||
467 | if (ivc->tx_channel->state != ivc_state_established) | |
468 | return -EAGAIN; | |
469 | ||
470 | return 0; | |
471 | } | |
472 | ||
473 | void tegra_ivc_channel_reset(struct tegra_ivc *ivc) | |
474 | { | |
475 | ulong offset; | |
476 | ||
477 | ivc->tx_channel->state = ivc_state_sync; | |
478 | offset = offsetof(struct tegra_ivc_channel_header, w_count); | |
479 | tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset); | |
480 | ivc->notify(ivc); | |
481 | } | |
482 | ||
483 | static int check_ivc_params(ulong qbase1, ulong qbase2, uint32_t nframes, | |
484 | uint32_t frame_size) | |
485 | { | |
486 | int ret = 0; | |
487 | ||
488 | BUG_ON(offsetof(struct tegra_ivc_channel_header, w_count) & | |
489 | (TEGRA_IVC_ALIGN - 1)); | |
490 | BUG_ON(offsetof(struct tegra_ivc_channel_header, r_count) & | |
491 | (TEGRA_IVC_ALIGN - 1)); | |
492 | BUG_ON(sizeof(struct tegra_ivc_channel_header) & | |
493 | (TEGRA_IVC_ALIGN - 1)); | |
494 | ||
495 | if ((uint64_t)nframes * (uint64_t)frame_size >= 0x100000000) { | |
9b643e31 | 496 | pr_err("tegra_ivc: nframes * frame_size overflows\n"); |
49626ea8 SW |
497 | return -EINVAL; |
498 | } | |
499 | ||
500 | /* | |
501 | * The headers must at least be aligned enough for counters | |
502 | * to be accessed atomically. | |
503 | */ | |
504 | if ((qbase1 & (TEGRA_IVC_ALIGN - 1)) || | |
505 | (qbase2 & (TEGRA_IVC_ALIGN - 1))) { | |
9b643e31 | 506 | pr_err("tegra_ivc: channel start not aligned\n"); |
49626ea8 SW |
507 | return -EINVAL; |
508 | } | |
509 | ||
510 | if (frame_size & (TEGRA_IVC_ALIGN - 1)) { | |
9b643e31 | 511 | pr_err("tegra_ivc: frame size not adequately aligned\n"); |
49626ea8 SW |
512 | return -EINVAL; |
513 | } | |
514 | ||
515 | if (qbase1 < qbase2) { | |
516 | if (qbase1 + frame_size * nframes > qbase2) | |
517 | ret = -EINVAL; | |
518 | } else { | |
519 | if (qbase2 + frame_size * nframes > qbase1) | |
520 | ret = -EINVAL; | |
521 | } | |
522 | ||
523 | if (ret) { | |
9b643e31 | 524 | pr_err("tegra_ivc: queue regions overlap\n"); |
49626ea8 SW |
525 | return ret; |
526 | } | |
527 | ||
528 | return 0; | |
529 | } | |
530 | ||
531 | int tegra_ivc_init(struct tegra_ivc *ivc, ulong rx_base, ulong tx_base, | |
532 | uint32_t nframes, uint32_t frame_size, | |
533 | void (*notify)(struct tegra_ivc *)) | |
534 | { | |
535 | int ret; | |
536 | ||
537 | if (!ivc) | |
538 | return -EINVAL; | |
539 | ||
540 | ret = check_ivc_params(rx_base, tx_base, nframes, frame_size); | |
541 | if (ret) | |
542 | return ret; | |
543 | ||
544 | ivc->rx_channel = (struct tegra_ivc_channel_header *)rx_base; | |
545 | ivc->tx_channel = (struct tegra_ivc_channel_header *)tx_base; | |
546 | ivc->w_pos = 0; | |
547 | ivc->r_pos = 0; | |
548 | ivc->nframes = nframes; | |
549 | ivc->frame_size = frame_size; | |
550 | ivc->notify = notify; | |
551 | ||
552 | return 0; | |
553 | } |