]>
Commit | Line | Data |
---|---|---|
3b20eb23 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3e7ee490 HJ |
2 | /* |
3 | * | |
4 | * Copyright (c) 2009, Microsoft Corporation. | |
5 | * | |
3e7ee490 HJ |
6 | * Authors: |
7 | * Haiyang Zhang <[email protected]> | |
8 | * Hank Janssen <[email protected]> | |
b2a5a585 | 9 | * K. Y. Srinivasan <[email protected]> |
3e7ee490 | 10 | */ |
0a46618d | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
3e7ee490 | 12 | |
a0086dc5 GKH |
13 | #include <linux/kernel.h> |
14 | #include <linux/mm.h> | |
46a97191 | 15 | #include <linux/hyperv.h> |
011a7c3c | 16 | #include <linux/uio.h> |
9988ce68 VK |
17 | #include <linux/vmalloc.h> |
18 | #include <linux/slab.h> | |
8dd45f2a | 19 | #include <linux/prefetch.h> |
9a879772 TL |
20 | #include <linux/io.h> |
21 | #include <asm/mshyperv.h> | |
3f335ea2 | 22 | |
0f2a6619 | 23 | #include "hyperv_vmbus.h" |
3e7ee490 | 24 | |
f3dd3f47 | 25 | #define VMBUS_PKT_TRAILER 8 |
26 | ||
98fa8cf4 S |
27 | /* |
28 | * When we write to the ring buffer, check if the host needs to | |
29 | * be signaled. Here is the details of this protocol: | |
30 | * | |
31 | * 1. The host guarantees that while it is draining the | |
32 | * ring buffer, it will set the interrupt_mask to | |
33 | * indicate it does not need to be interrupted when | |
34 | * new data is placed. | |
35 | * | |
36 | * 2. The host guarantees that it will completely drain | |
37 | * the ring buffer before exiting the read loop. Further, | |
38 | * once the ring buffer is empty, it will clear the | |
39 | * interrupt_mask and re-check to see if new data has | |
40 | * arrived. | |
1f6ee4e7 S |
41 | * |
42 | * KYS: Oct. 30, 2016: | |
43 | * It looks like Windows hosts have logic to deal with DOS attacks that | |
44 | * can be triggered if it receives interrupts when it is not expecting | |
45 | * the interrupt. The host expects interrupts only when the ring | |
46 | * transitions from empty to non-empty (or full to non full on the guest | |
47 | * to host ring). | |
48 | * So, base the signaling decision solely on the ring state until the | |
49 | * host logic is fixed. | |
98fa8cf4 S |
50 | */ |
51 | ||
b103a56f | 52 | static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel) |
98fa8cf4 | 53 | { |
1f6ee4e7 S |
54 | struct hv_ring_buffer_info *rbi = &channel->outbound; |
55 | ||
dcd0eeca | 56 | virt_mb(); |
d45faaee | 57 | if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) |
1f6ee4e7 | 58 | return; |
98fa8cf4 | 59 | |
e91e84fa | 60 | /* check interrupt_mask before read_index */ |
dcd0eeca | 61 | virt_rmb(); |
98fa8cf4 S |
62 | /* |
63 | * This is the only case we need to signal when the | |
64 | * ring transitions from being empty to non-empty. | |
65 | */ | |
396ae57e KB |
66 | if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) { |
67 | ++channel->intr_out_empty; | |
1f6ee4e7 | 68 | vmbus_setevent(channel); |
396ae57e | 69 | } |
98fa8cf4 S |
70 | } |
71 | ||
822f18d4 | 72 | /* Get the next write location for the specified ring buffer. */ |
4d643114 | 73 | static inline u32 |
2b8a912e | 74 | hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 75 | { |
fc8c72eb | 76 | u32 next = ring_info->ring_buffer->write_index; |
3e7ee490 | 77 | |
3e7ee490 HJ |
78 | return next; |
79 | } | |
80 | ||
822f18d4 | 81 | /* Set the next write location for the specified ring buffer. */ |
3e7ee490 | 82 | static inline void |
2b8a912e | 83 | hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, |
fc8c72eb | 84 | u32 next_write_location) |
3e7ee490 | 85 | { |
fc8c72eb | 86 | ring_info->ring_buffer->write_index = next_write_location; |
3e7ee490 HJ |
87 | } |
88 | ||
822f18d4 | 89 | /* Get the size of the ring buffer. */ |
4d643114 | 90 | static inline u32 |
e4165a0f | 91 | hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 92 | { |
fc8c72eb | 93 | return ring_info->ring_datasize; |
3e7ee490 HJ |
94 | } |
95 | ||
822f18d4 | 96 | /* Get the read and write indices as u64 of the specified ring buffer. */ |
59471438 | 97 | static inline u64 |
2b8a912e | 98 | hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 99 | { |
fc8c72eb | 100 | return (u64)ring_info->ring_buffer->write_index << 32; |
3e7ee490 HJ |
101 | } |
102 | ||
7581578d | 103 | /* |
7581578d S |
104 | * Helper routine to copy from source to ring buffer. |
105 | * Assume there is enough room. Handles wrap-around in dest case only!! | |
7581578d S |
106 | */ |
107 | static u32 hv_copyto_ringbuffer( | |
fc8c72eb HZ |
108 | struct hv_ring_buffer_info *ring_info, |
109 | u32 start_write_offset, | |
e4165a0f | 110 | const void *src, |
7581578d S |
111 | u32 srclen) |
112 | { | |
113 | void *ring_buffer = hv_get_ring_buffer(ring_info); | |
114 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); | |
f24f0b49 VK |
115 | |
116 | memcpy(ring_buffer + start_write_offset, src, srclen); | |
3e7ee490 | 117 | |
7581578d | 118 | start_write_offset += srclen; |
8d12f882 SH |
119 | if (start_write_offset >= ring_buffer_size) |
120 | start_write_offset -= ring_buffer_size; | |
7581578d S |
121 | |
122 | return start_write_offset; | |
123 | } | |
3e7ee490 | 124 | |
0487426f SH |
125 | /* |
126 | * | |
127 | * hv_get_ringbuffer_availbytes() | |
128 | * | |
129 | * Get number of bytes available to read and to write to | |
130 | * for the specified ring buffer | |
131 | */ | |
132 | static void | |
133 | hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, | |
134 | u32 *read, u32 *write) | |
135 | { | |
136 | u32 read_loc, write_loc, dsize; | |
137 | ||
138 | /* Capture the read/write indices before they changed */ | |
139 | read_loc = READ_ONCE(rbi->ring_buffer->read_index); | |
140 | write_loc = READ_ONCE(rbi->ring_buffer->write_index); | |
141 | dsize = rbi->ring_datasize; | |
142 | ||
143 | *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : | |
144 | read_loc - write_loc; | |
145 | *read = dsize - *write; | |
146 | } | |
147 | ||
822f18d4 | 148 | /* Get various debug metrics for the specified ring buffer. */ |
14948e39 | 149 | int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, |
ba50bf1c | 150 | struct hv_ring_buffer_debug_info *debug_info) |
3e7ee490 | 151 | { |
fc8c72eb HZ |
152 | u32 bytes_avail_towrite; |
153 | u32 bytes_avail_toread; | |
3e7ee490 | 154 | |
14948e39 KB |
155 | mutex_lock(&ring_info->ring_buffer_mutex); |
156 | ||
157 | if (!ring_info->ring_buffer) { | |
158 | mutex_unlock(&ring_info->ring_buffer_mutex); | |
ba50bf1c | 159 | return -EINVAL; |
14948e39 | 160 | } |
ba50bf1c DC |
161 | |
162 | hv_get_ringbuffer_availbytes(ring_info, | |
163 | &bytes_avail_toread, | |
164 | &bytes_avail_towrite); | |
165 | debug_info->bytes_avail_toread = bytes_avail_toread; | |
166 | debug_info->bytes_avail_towrite = bytes_avail_towrite; | |
167 | debug_info->current_read_index = ring_info->ring_buffer->read_index; | |
168 | debug_info->current_write_index = ring_info->ring_buffer->write_index; | |
169 | debug_info->current_interrupt_mask | |
170 | = ring_info->ring_buffer->interrupt_mask; | |
14948e39 KB |
171 | mutex_unlock(&ring_info->ring_buffer_mutex); |
172 | ||
ba50bf1c | 173 | return 0; |
3e7ee490 | 174 | } |
4827ee1d | 175 | EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); |
3e7ee490 | 176 | |
14948e39 KB |
177 | /* Initialize a channel's ring buffer info mutex locks */ |
178 | void hv_ringbuffer_pre_init(struct vmbus_channel *channel) | |
179 | { | |
180 | mutex_init(&channel->inbound.ring_buffer_mutex); | |
181 | mutex_init(&channel->outbound.ring_buffer_mutex); | |
182 | } | |
183 | ||
822f18d4 | 184 | /* Initialize the ring buffer. */ |
72a95cbc | 185 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, |
adae1e93 | 186 | struct page *pages, u32 page_cnt, u32 max_pkt_size) |
3e7ee490 | 187 | { |
9988ce68 | 188 | struct page **pages_wraparound; |
9a879772 | 189 | int i; |
9988ce68 VK |
190 | |
191 | BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE)); | |
3e7ee490 | 192 | |
9988ce68 VK |
193 | /* |
194 | * First page holds struct hv_ring_buffer, do wraparound mapping for | |
195 | * the rest. | |
196 | */ | |
bb862397 MK |
197 | pages_wraparound = kcalloc(page_cnt * 2 - 1, |
198 | sizeof(struct page *), | |
199 | GFP_KERNEL); | |
200 | if (!pages_wraparound) | |
201 | return -ENOMEM; | |
9a879772 | 202 | |
bb862397 MK |
203 | pages_wraparound[0] = pages; |
204 | for (i = 0; i < 2 * (page_cnt - 1); i++) | |
205 | pages_wraparound[i + 1] = | |
206 | &pages[i % (page_cnt - 1) + 1]; | |
9a879772 | 207 | |
bb862397 MK |
208 | ring_info->ring_buffer = (struct hv_ring_buffer *) |
209 | vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, | |
210 | pgprot_decrypted(PAGE_KERNEL)); | |
9a879772 | 211 | |
bb862397 MK |
212 | kfree(pages_wraparound); |
213 | if (!ring_info->ring_buffer) | |
214 | return -ENOMEM; | |
9988ce68 | 215 | |
bb862397 MK |
216 | /* |
217 | * Ensure the header page is zero'ed since | |
218 | * encryption status may have changed. | |
219 | */ | |
220 | memset(ring_info->ring_buffer, 0, HV_HYP_PAGE_SIZE); | |
9988ce68 | 221 | |
fc8c72eb HZ |
222 | ring_info->ring_buffer->read_index = |
223 | ring_info->ring_buffer->write_index = 0; | |
3e7ee490 | 224 | |
822f18d4 | 225 | /* Set the feature bit for enabling flow control. */ |
046c7911 S |
226 | ring_info->ring_buffer->feature_bits.value = 1; |
227 | ||
9988ce68 | 228 | ring_info->ring_size = page_cnt << PAGE_SHIFT; |
63273cb4 LL |
229 | ring_info->ring_size_div10_reciprocal = |
230 | reciprocal_value(ring_info->ring_size / 10); | |
9988ce68 VK |
231 | ring_info->ring_datasize = ring_info->ring_size - |
232 | sizeof(struct hv_ring_buffer); | |
4713eb7b | 233 | ring_info->priv_read_index = 0; |
3e7ee490 | 234 | |
adae1e93 AB |
235 | /* Initialize buffer that holds copies of incoming packets */ |
236 | if (max_pkt_size) { | |
237 | ring_info->pkt_buffer = kzalloc(max_pkt_size, GFP_KERNEL); | |
238 | if (!ring_info->pkt_buffer) | |
239 | return -ENOMEM; | |
240 | ring_info->pkt_buffer_size = max_pkt_size; | |
241 | } | |
242 | ||
fc8c72eb | 243 | spin_lock_init(&ring_info->ring_lock); |
3e7ee490 HJ |
244 | |
245 | return 0; | |
246 | } | |
247 | ||
822f18d4 | 248 | /* Cleanup the ring buffer. */ |
2dba688b | 249 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 250 | { |
14948e39 | 251 | mutex_lock(&ring_info->ring_buffer_mutex); |
9988ce68 | 252 | vunmap(ring_info->ring_buffer); |
ae6935ed | 253 | ring_info->ring_buffer = NULL; |
14948e39 | 254 | mutex_unlock(&ring_info->ring_buffer_mutex); |
adae1e93 AB |
255 | |
256 | kfree(ring_info->pkt_buffer); | |
f1940d4e | 257 | ring_info->pkt_buffer = NULL; |
adae1e93 | 258 | ring_info->pkt_buffer_size = 0; |
3e7ee490 HJ |
259 | } |
260 | ||
1d044ca0 GP |
261 | /* |
262 | * Check if the ring buffer spinlock is available to take or not; used on | |
263 | * atomic contexts, like panic path (see the Hyper-V framebuffer driver). | |
264 | */ | |
265 | ||
266 | bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel) | |
267 | { | |
268 | struct hv_ring_buffer_info *rinfo = &channel->outbound; | |
269 | ||
270 | return spin_is_locked(&rinfo->ring_lock); | |
271 | } | |
272 | EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy); | |
273 | ||
822f18d4 | 274 | /* Write to the ring buffer. */ |
1f6ee4e7 | 275 | int hv_ringbuffer_write(struct vmbus_channel *channel, |
e8b7db38 | 276 | const struct kvec *kv_list, u32 kv_count, |
b03afa57 | 277 | u64 requestid, u64 *trans_id) |
3e7ee490 | 278 | { |
2c616a8b | 279 | int i; |
fc8c72eb | 280 | u32 bytes_avail_towrite; |
2c616a8b | 281 | u32 totalbytes_towrite = sizeof(u64); |
66a60543 | 282 | u32 next_write_location; |
98fa8cf4 | 283 | u32 old_write; |
2c616a8b SH |
284 | u64 prev_indices; |
285 | unsigned long flags; | |
1f6ee4e7 | 286 | struct hv_ring_buffer_info *outring_info = &channel->outbound; |
e8b7db38 | 287 | struct vmpacket_descriptor *desc = kv_list[0].iov_base; |
b03afa57 | 288 | u64 __trans_id, rqst_id = VMBUS_NO_RQSTOR; |
3e7ee490 | 289 | |
e7e97dd8 S |
290 | if (channel->rescind) |
291 | return -ENODEV; | |
292 | ||
011a7c3c S |
293 | for (i = 0; i < kv_count; i++) |
294 | totalbytes_towrite += kv_list[i].iov_len; | |
3e7ee490 | 295 | |
5529eaf6 | 296 | spin_lock_irqsave(&outring_info->ring_lock, flags); |
3e7ee490 | 297 | |
a6341f00 | 298 | bytes_avail_towrite = hv_get_bytes_to_write(outring_info); |
3e7ee490 | 299 | |
822f18d4 VK |
300 | /* |
301 | * If there is only room for the packet, assume it is full. | |
302 | * Otherwise, the next time around, we think the ring buffer | |
303 | * is empty since the read index == write index. | |
304 | */ | |
fc8c72eb | 305 | if (bytes_avail_towrite <= totalbytes_towrite) { |
396ae57e KB |
306 | ++channel->out_full_total; |
307 | ||
308 | if (!channel->out_full_flag) { | |
309 | ++channel->out_full_first; | |
310 | channel->out_full_flag = true; | |
311 | } | |
312 | ||
5529eaf6 | 313 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
d2598f01 | 314 | return -EAGAIN; |
3e7ee490 HJ |
315 | } |
316 | ||
396ae57e KB |
317 | channel->out_full_flag = false; |
318 | ||
454f18a9 | 319 | /* Write to the ring buffer */ |
2b8a912e | 320 | next_write_location = hv_get_next_write_location(outring_info); |
3e7ee490 | 321 | |
98fa8cf4 S |
322 | old_write = next_write_location; |
323 | ||
011a7c3c | 324 | for (i = 0; i < kv_count; i++) { |
2b8a912e | 325 | next_write_location = hv_copyto_ringbuffer(outring_info, |
fc8c72eb | 326 | next_write_location, |
011a7c3c S |
327 | kv_list[i].iov_base, |
328 | kv_list[i].iov_len); | |
3e7ee490 HJ |
329 | } |
330 | ||
e8b7db38 AB |
331 | /* |
332 | * Allocate the request ID after the data has been copied into the | |
333 | * ring buffer. Once this request ID is allocated, the completion | |
334 | * path could find the data and free it. | |
335 | */ | |
336 | ||
337 | if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) { | |
bf5fd8ca APM |
338 | if (channel->next_request_id_callback != NULL) { |
339 | rqst_id = channel->next_request_id_callback(channel, requestid); | |
340 | if (rqst_id == VMBUS_RQST_ERROR) { | |
341 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | |
342 | return -EAGAIN; | |
343 | } | |
e8b7db38 AB |
344 | } |
345 | } | |
346 | desc = hv_get_ring_buffer(outring_info) + old_write; | |
b03afa57 APM |
347 | __trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id; |
348 | /* | |
349 | * Ensure the compiler doesn't generate code that reads the value of | |
350 | * the transaction ID from the ring buffer, which is shared with the | |
351 | * Hyper-V host and subject to being changed at any time. | |
352 | */ | |
353 | WRITE_ONCE(desc->trans_id, __trans_id); | |
354 | if (trans_id) | |
355 | *trans_id = __trans_id; | |
e8b7db38 | 356 | |
454f18a9 | 357 | /* Set previous packet start */ |
2b8a912e | 358 | prev_indices = hv_get_ring_bufferindices(outring_info); |
3e7ee490 | 359 | |
2b8a912e | 360 | next_write_location = hv_copyto_ringbuffer(outring_info, |
fc8c72eb HZ |
361 | next_write_location, |
362 | &prev_indices, | |
b219b3f7 | 363 | sizeof(u64)); |
3e7ee490 | 364 | |
98fa8cf4 | 365 | /* Issue a full memory barrier before updating the write index */ |
dcd0eeca | 366 | virt_mb(); |
3e7ee490 | 367 | |
454f18a9 | 368 | /* Now, update the write location */ |
2b8a912e | 369 | hv_set_next_write_location(outring_info, next_write_location); |
3e7ee490 | 370 | |
3e7ee490 | 371 | |
5529eaf6 | 372 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
98fa8cf4 | 373 | |
b103a56f | 374 | hv_signal_on_write(old_write, channel); |
e7e97dd8 | 375 | |
e8b7db38 AB |
376 | if (channel->rescind) { |
377 | if (rqst_id != VMBUS_NO_RQSTOR) { | |
378 | /* Reclaim request ID to avoid leak of IDs */ | |
bf5fd8ca APM |
379 | if (channel->request_addr_callback != NULL) |
380 | channel->request_addr_callback(channel, rqst_id); | |
e8b7db38 | 381 | } |
e7e97dd8 | 382 | return -ENODEV; |
e8b7db38 | 383 | } |
e7e97dd8 | 384 | |
3e7ee490 HJ |
385 | return 0; |
386 | } | |
387 | ||
3372592a | 388 | int hv_ringbuffer_read(struct vmbus_channel *channel, |
940b68e2 | 389 | void *buffer, u32 buflen, u32 *buffer_actual_len, |
3372592a | 390 | u64 *requestid, bool raw) |
3e7ee490 | 391 | { |
4226ff69 SH |
392 | struct vmpacket_descriptor *desc; |
393 | u32 packetlen, offset; | |
394 | ||
395 | if (unlikely(buflen == 0)) | |
a16e1485 | 396 | return -EINVAL; |
3e7ee490 | 397 | |
940b68e2 VK |
398 | *buffer_actual_len = 0; |
399 | *requestid = 0; | |
400 | ||
454f18a9 | 401 | /* Make sure there is something to read */ |
4226ff69 SH |
402 | desc = hv_pkt_iter_first(channel); |
403 | if (desc == NULL) { | |
940b68e2 VK |
404 | /* |
405 | * No error is set when there is even no header, drivers are | |
406 | * supposed to analyze buffer_actual_len. | |
407 | */ | |
42dd2715 | 408 | return 0; |
940b68e2 | 409 | } |
3e7ee490 | 410 | |
4226ff69 SH |
411 | offset = raw ? 0 : (desc->offset8 << 3); |
412 | packetlen = (desc->len8 << 3) - offset; | |
940b68e2 | 413 | *buffer_actual_len = packetlen; |
4226ff69 | 414 | *requestid = desc->trans_id; |
940b68e2 | 415 | |
4226ff69 | 416 | if (unlikely(packetlen > buflen)) |
3eba9a77 | 417 | return -ENOBUFS; |
3e7ee490 | 418 | |
4226ff69 SH |
419 | /* since ring is double mapped, only one copy is necessary */ |
420 | memcpy(buffer, (const char *)desc + offset, packetlen); | |
3e7ee490 | 421 | |
4226ff69 | 422 | /* Advance ring index to next packet descriptor */ |
1c9de08f | 423 | __hv_pkt_iter_next(channel, desc); |
3e7ee490 | 424 | |
4226ff69 SH |
425 | /* Notify host of update */ |
426 | hv_pkt_iter_close(channel); | |
c2b8e520 | 427 | |
42dd2715 | 428 | return 0; |
b5f53dde | 429 | } |
f3dd3f47 | 430 | |
431 | /* | |
432 | * Determine number of bytes available in ring buffer after | |
433 | * the current iterator (priv_read_index) location. | |
434 | * | |
435 | * This is similar to hv_get_bytes_to_read but with private | |
436 | * read index instead. | |
437 | */ | |
438 | static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) | |
439 | { | |
440 | u32 priv_read_loc = rbi->priv_read_index; | |
b6cae15b MK |
441 | u32 write_loc; |
442 | ||
443 | /* | |
444 | * The Hyper-V host writes the packet data, then uses | |
445 | * store_release() to update the write_index. Use load_acquire() | |
446 | * here to prevent loads of the packet data from being re-ordered | |
447 | * before the read of the write_index and potentially getting | |
448 | * stale data. | |
449 | */ | |
450 | write_loc = virt_load_acquire(&rbi->ring_buffer->write_index); | |
f3dd3f47 | 451 | |
452 | if (write_loc >= priv_read_loc) | |
453 | return write_loc - priv_read_loc; | |
454 | else | |
455 | return (rbi->ring_datasize - priv_read_loc) + write_loc; | |
456 | } | |
457 | ||
458 | /* | |
459 | * Get first vmbus packet from ring buffer after read_index | |
460 | * | |
461 | * If ring buffer is empty, returns NULL and no other action needed. | |
462 | */ | |
463 | struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel) | |
464 | { | |
465 | struct hv_ring_buffer_info *rbi = &channel->inbound; | |
adae1e93 AB |
466 | struct vmpacket_descriptor *desc, *desc_copy; |
467 | u32 bytes_avail, pkt_len, pkt_offset; | |
f3dd3f47 | 468 | |
1c9de08f APM |
469 | hv_debug_delay_test(channel, MESSAGE_DELAY); |
470 | ||
471 | bytes_avail = hv_pkt_iter_avail(rbi); | |
472 | if (bytes_avail < sizeof(struct vmpacket_descriptor)) | |
f3dd3f47 | 473 | return NULL; |
1c9de08f | 474 | bytes_avail = min(rbi->pkt_buffer_size, bytes_avail); |
f3dd3f47 | 475 | |
1c9de08f | 476 | desc = (struct vmpacket_descriptor *)(hv_get_ring_buffer(rbi) + rbi->priv_read_index); |
adae1e93 AB |
477 | |
478 | /* | |
479 | * Ensure the compiler does not use references to incoming Hyper-V values (which | |
480 | * could change at any moment) when reading local variables later in the code | |
481 | */ | |
482 | pkt_len = READ_ONCE(desc->len8) << 3; | |
483 | pkt_offset = READ_ONCE(desc->offset8) << 3; | |
484 | ||
485 | /* | |
486 | * If pkt_len is invalid, set it to the smaller of hv_pkt_iter_avail() and | |
487 | * rbi->pkt_buffer_size | |
488 | */ | |
489 | if (pkt_len < sizeof(struct vmpacket_descriptor) || pkt_len > bytes_avail) | |
490 | pkt_len = bytes_avail; | |
491 | ||
492 | /* | |
493 | * If pkt_offset is invalid, arbitrarily set it to | |
494 | * the size of vmpacket_descriptor | |
495 | */ | |
496 | if (pkt_offset < sizeof(struct vmpacket_descriptor) || pkt_offset > pkt_len) | |
497 | pkt_offset = sizeof(struct vmpacket_descriptor); | |
498 | ||
499 | /* Copy the Hyper-V packet out of the ring buffer */ | |
500 | desc_copy = (struct vmpacket_descriptor *)rbi->pkt_buffer; | |
501 | memcpy(desc_copy, desc, pkt_len); | |
502 | ||
503 | /* | |
504 | * Hyper-V could still change len8 and offset8 after the earlier read. | |
505 | * Ensure that desc_copy has legal values for len8 and offset8 that | |
506 | * are consistent with the copy we just made | |
507 | */ | |
508 | desc_copy->len8 = pkt_len >> 3; | |
509 | desc_copy->offset8 = pkt_offset >> 3; | |
15e1674d | 510 | |
adae1e93 | 511 | return desc_copy; |
f3dd3f47 | 512 | } |
513 | EXPORT_SYMBOL_GPL(hv_pkt_iter_first); | |
514 | ||
515 | /* | |
516 | * Get next vmbus packet from ring buffer. | |
517 | * | |
518 | * Advances the current location (priv_read_index) and checks for more | |
519 | * data. If the end of the ring buffer is reached, then return NULL. | |
520 | */ | |
521 | struct vmpacket_descriptor * | |
522 | __hv_pkt_iter_next(struct vmbus_channel *channel, | |
1c9de08f | 523 | const struct vmpacket_descriptor *desc) |
f3dd3f47 | 524 | { |
525 | struct hv_ring_buffer_info *rbi = &channel->inbound; | |
526 | u32 packetlen = desc->len8 << 3; | |
527 | u32 dsize = rbi->ring_datasize; | |
528 | ||
af9ca6f9 | 529 | hv_debug_delay_test(channel, MESSAGE_DELAY); |
f3dd3f47 | 530 | /* bump offset to next potential packet */ |
531 | rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER; | |
532 | if (rbi->priv_read_index >= dsize) | |
533 | rbi->priv_read_index -= dsize; | |
534 | ||
535 | /* more data? */ | |
1c9de08f | 536 | return hv_pkt_iter_first(channel); |
f3dd3f47 | 537 | } |
538 | EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); | |
539 | ||
655296c8 MK |
540 | /* How many bytes were read in this iterator cycle */ |
541 | static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi, | |
542 | u32 start_read_index) | |
543 | { | |
544 | if (rbi->priv_read_index >= start_read_index) | |
545 | return rbi->priv_read_index - start_read_index; | |
546 | else | |
547 | return rbi->ring_datasize - start_read_index + | |
548 | rbi->priv_read_index; | |
549 | } | |
550 | ||
f3dd3f47 | 551 | /* |
71b38245 MK |
552 | * Update host ring buffer after iterating over packets. If the host has |
553 | * stopped queuing new entries because it found the ring buffer full, and | |
554 | * sufficient space is being freed up, signal the host. But be careful to | |
555 | * only signal the host when necessary, both for performance reasons and | |
556 | * because Hyper-V protects itself by throttling guests that signal | |
557 | * inappropriately. | |
558 | * | |
559 | * Determining when to signal is tricky. There are three key data inputs | |
560 | * that must be handled in this order to avoid race conditions: | |
561 | * | |
562 | * 1. Update the read_index | |
563 | * 2. Read the pending_send_sz | |
564 | * 3. Read the current write_index | |
565 | * | |
566 | * The interrupt_mask is not used to determine when to signal. The | |
567 | * interrupt_mask is used only on the guest->host ring buffer when | |
568 | * sending requests to the host. The host does not use it on the host-> | |
569 | * guest ring buffer to indicate whether it should be signaled. | |
f3dd3f47 | 570 | */ |
571 | void hv_pkt_iter_close(struct vmbus_channel *channel) | |
572 | { | |
573 | struct hv_ring_buffer_info *rbi = &channel->inbound; | |
655296c8 | 574 | u32 curr_write_sz, pending_sz, bytes_read, start_read_index; |
f3dd3f47 | 575 | |
576 | /* | |
577 | * Make sure all reads are done before we update the read index since | |
578 | * the writer may start writing to the read area once the read index | |
579 | * is updated. | |
580 | */ | |
581 | virt_rmb(); | |
655296c8 | 582 | start_read_index = rbi->ring_buffer->read_index; |
f3dd3f47 | 583 | rbi->ring_buffer->read_index = rbi->priv_read_index; |
584 | ||
71b38245 MK |
585 | /* |
586 | * Older versions of Hyper-V (before WS2102 and Win8) do not | |
587 | * implement pending_send_sz and simply poll if the host->guest | |
588 | * ring buffer is full. No signaling is needed or expected. | |
589 | */ | |
655296c8 MK |
590 | if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz) |
591 | return; | |
592 | ||
8dd45f2a SH |
593 | /* |
594 | * Issue a full memory barrier before making the signaling decision. | |
71b38245 MK |
595 | * If reading pending_send_sz were to be reordered and happen |
596 | * before we commit the new read_index, a race could occur. If the | |
597 | * host were to set the pending_send_sz after we have sampled | |
598 | * pending_send_sz, and the ring buffer blocks before we commit the | |
8dd45f2a SH |
599 | * read index, we could miss sending the interrupt. Issue a full |
600 | * memory barrier to address this. | |
601 | */ | |
602 | virt_mb(); | |
603 | ||
71b38245 MK |
604 | /* |
605 | * If the pending_send_sz is zero, then the ring buffer is not | |
606 | * blocked and there is no need to signal. This is far by the | |
607 | * most common case, so exit quickly for best performance. | |
608 | */ | |
655296c8 MK |
609 | pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); |
610 | if (!pending_sz) | |
8dd45f2a SH |
611 | return; |
612 | ||
655296c8 MK |
613 | /* |
614 | * Ensure the read of write_index in hv_get_bytes_to_write() | |
615 | * happens after the read of pending_send_sz. | |
616 | */ | |
617 | virt_rmb(); | |
618 | curr_write_sz = hv_get_bytes_to_write(rbi); | |
619 | bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index); | |
8dd45f2a | 620 | |
655296c8 | 621 | /* |
71b38245 MK |
622 | * We want to signal the host only if we're transitioning |
623 | * from a "not enough free space" state to a "enough free | |
624 | * space" state. For example, it's possible that this function | |
625 | * could run and free up enough space to signal the host, and then | |
626 | * run again and free up additional space before the host has a | |
627 | * chance to clear the pending_send_sz. The 2nd invocation would | |
628 | * be a null transition from "enough free space" to "enough free | |
629 | * space", which doesn't warrant a signal. | |
630 | * | |
631 | * Exactly filling the ring buffer is treated as "not enough | |
632 | * space". The ring buffer always must have at least one byte | |
633 | * empty so the empty and full conditions are distinguishable. | |
634 | * hv_get_bytes_to_write() doesn't fully tell the truth in | |
635 | * this regard. | |
636 | * | |
637 | * So first check if we were in the "enough free space" state | |
638 | * before we began the iteration. If so, the host was not | |
639 | * blocked, and there's no need to signal. | |
655296c8 | 640 | */ |
655296c8 MK |
641 | if (curr_write_sz - bytes_read > pending_sz) |
642 | return; | |
643 | ||
71b38245 MK |
644 | /* |
645 | * Similarly, if the new state is "not enough space", then | |
646 | * there's no need to signal. | |
647 | */ | |
655296c8 MK |
648 | if (curr_write_sz <= pending_sz) |
649 | return; | |
03bad714 | 650 | |
396ae57e | 651 | ++channel->intr_in_full; |
03bad714 | 652 | vmbus_setevent(channel); |
f3dd3f47 | 653 | } |
654 | EXPORT_SYMBOL_GPL(hv_pkt_iter_close); |