]>
Commit | Line | Data |
---|---|---|
3e7ee490 HJ |
1 | /* |
2 | * | |
3 | * Copyright (c) 2009, Microsoft Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
17 | * | |
18 | * Authors: | |
19 | * Haiyang Zhang <[email protected]> | |
20 | * Hank Janssen <[email protected]> | |
b2a5a585 | 21 | * K. Y. Srinivasan <[email protected]> |
3e7ee490 HJ |
22 | * |
23 | */ | |
0a46618d | 24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
3e7ee490 | 25 | |
a0086dc5 GKH |
26 | #include <linux/kernel.h> |
27 | #include <linux/mm.h> | |
46a97191 | 28 | #include <linux/hyperv.h> |
011a7c3c | 29 | #include <linux/uio.h> |
9988ce68 VK |
30 | #include <linux/vmalloc.h> |
31 | #include <linux/slab.h> | |
8dd45f2a | 32 | #include <linux/prefetch.h> |
3f335ea2 | 33 | |
0f2a6619 | 34 | #include "hyperv_vmbus.h" |
3e7ee490 | 35 | |
f3dd3f47 | 36 | #define VMBUS_PKT_TRAILER 8 |
37 | ||
98fa8cf4 S |
38 | /* |
39 | * When we write to the ring buffer, check if the host needs to | |
40 | * be signaled. Here is the details of this protocol: | |
41 | * | |
42 | * 1. The host guarantees that while it is draining the | |
43 | * ring buffer, it will set the interrupt_mask to | |
44 | * indicate it does not need to be interrupted when | |
45 | * new data is placed. | |
46 | * | |
47 | * 2. The host guarantees that it will completely drain | |
48 | * the ring buffer before exiting the read loop. Further, | |
49 | * once the ring buffer is empty, it will clear the | |
50 | * interrupt_mask and re-check to see if new data has | |
51 | * arrived. | |
1f6ee4e7 S |
52 | * |
53 | * KYS: Oct. 30, 2016: | |
54 | * It looks like Windows hosts have logic to deal with DOS attacks that | |
55 | * can be triggered if it receives interrupts when it is not expecting | |
56 | * the interrupt. The host expects interrupts only when the ring | |
57 | * transitions from empty to non-empty (or full to non full on the guest | |
58 | * to host ring). | |
59 | * So, base the signaling decision solely on the ring state until the | |
60 | * host logic is fixed. | |
98fa8cf4 S |
61 | */ |
62 | ||
b103a56f | 63 | static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel) |
98fa8cf4 | 64 | { |
1f6ee4e7 S |
65 | struct hv_ring_buffer_info *rbi = &channel->outbound; |
66 | ||
dcd0eeca | 67 | virt_mb(); |
d45faaee | 68 | if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) |
1f6ee4e7 | 69 | return; |
98fa8cf4 | 70 | |
e91e84fa | 71 | /* check interrupt_mask before read_index */ |
dcd0eeca | 72 | virt_rmb(); |
98fa8cf4 S |
73 | /* |
74 | * This is the only case we need to signal when the | |
75 | * ring transitions from being empty to non-empty. | |
76 | */ | |
d45faaee | 77 | if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) |
1f6ee4e7 | 78 | vmbus_setevent(channel); |
98fa8cf4 S |
79 | } |
80 | ||
822f18d4 | 81 | /* Get the next write location for the specified ring buffer. */ |
4d643114 | 82 | static inline u32 |
2b8a912e | 83 | hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 84 | { |
fc8c72eb | 85 | u32 next = ring_info->ring_buffer->write_index; |
3e7ee490 | 86 | |
3e7ee490 HJ |
87 | return next; |
88 | } | |
89 | ||
822f18d4 | 90 | /* Set the next write location for the specified ring buffer. */ |
3e7ee490 | 91 | static inline void |
2b8a912e | 92 | hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, |
fc8c72eb | 93 | u32 next_write_location) |
3e7ee490 | 94 | { |
fc8c72eb | 95 | ring_info->ring_buffer->write_index = next_write_location; |
3e7ee490 HJ |
96 | } |
97 | ||
822f18d4 | 98 | /* Set the next read location for the specified ring buffer. */ |
3e7ee490 | 99 | static inline void |
2b8a912e | 100 | hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, |
fc8c72eb | 101 | u32 next_read_location) |
3e7ee490 | 102 | { |
fc8c72eb | 103 | ring_info->ring_buffer->read_index = next_read_location; |
ab028db4 | 104 | ring_info->priv_read_index = next_read_location; |
3e7ee490 HJ |
105 | } |
106 | ||
822f18d4 | 107 | /* Get the size of the ring buffer. */ |
4d643114 | 108 | static inline u32 |
e4165a0f | 109 | hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 110 | { |
fc8c72eb | 111 | return ring_info->ring_datasize; |
3e7ee490 HJ |
112 | } |
113 | ||
822f18d4 | 114 | /* Get the read and write indices as u64 of the specified ring buffer. */ |
59471438 | 115 | static inline u64 |
2b8a912e | 116 | hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 117 | { |
fc8c72eb | 118 | return (u64)ring_info->ring_buffer->write_index << 32; |
3e7ee490 HJ |
119 | } |
120 | ||
7581578d | 121 | /* |
7581578d S |
122 | * Helper routine to copy from source to ring buffer. |
123 | * Assume there is enough room. Handles wrap-around in dest case only!! | |
7581578d S |
124 | */ |
125 | static u32 hv_copyto_ringbuffer( | |
fc8c72eb HZ |
126 | struct hv_ring_buffer_info *ring_info, |
127 | u32 start_write_offset, | |
e4165a0f | 128 | const void *src, |
7581578d S |
129 | u32 srclen) |
130 | { | |
131 | void *ring_buffer = hv_get_ring_buffer(ring_info); | |
132 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); | |
f24f0b49 VK |
133 | |
134 | memcpy(ring_buffer + start_write_offset, src, srclen); | |
3e7ee490 | 135 | |
7581578d | 136 | start_write_offset += srclen; |
8d12f882 SH |
137 | if (start_write_offset >= ring_buffer_size) |
138 | start_write_offset -= ring_buffer_size; | |
7581578d S |
139 | |
140 | return start_write_offset; | |
141 | } | |
3e7ee490 | 142 | |
0487426f SH |
143 | /* |
144 | * | |
145 | * hv_get_ringbuffer_availbytes() | |
146 | * | |
147 | * Get number of bytes available to read and to write to | |
148 | * for the specified ring buffer | |
149 | */ | |
150 | static void | |
151 | hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, | |
152 | u32 *read, u32 *write) | |
153 | { | |
154 | u32 read_loc, write_loc, dsize; | |
155 | ||
156 | /* Capture the read/write indices before they changed */ | |
157 | read_loc = READ_ONCE(rbi->ring_buffer->read_index); | |
158 | write_loc = READ_ONCE(rbi->ring_buffer->write_index); | |
159 | dsize = rbi->ring_datasize; | |
160 | ||
161 | *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : | |
162 | read_loc - write_loc; | |
163 | *read = dsize - *write; | |
164 | } | |
165 | ||
822f18d4 | 166 | /* Get various debug metrics for the specified ring buffer. */ |
e4165a0f SH |
167 | void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, |
168 | struct hv_ring_buffer_debug_info *debug_info) | |
3e7ee490 | 169 | { |
fc8c72eb HZ |
170 | u32 bytes_avail_towrite; |
171 | u32 bytes_avail_toread; | |
3e7ee490 | 172 | |
fc8c72eb | 173 | if (ring_info->ring_buffer) { |
2b8a912e | 174 | hv_get_ringbuffer_availbytes(ring_info, |
fc8c72eb HZ |
175 | &bytes_avail_toread, |
176 | &bytes_avail_towrite); | |
3e7ee490 | 177 | |
fc8c72eb HZ |
178 | debug_info->bytes_avail_toread = bytes_avail_toread; |
179 | debug_info->bytes_avail_towrite = bytes_avail_towrite; | |
82f8bd40 | 180 | debug_info->current_read_index = |
fc8c72eb | 181 | ring_info->ring_buffer->read_index; |
82f8bd40 | 182 | debug_info->current_write_index = |
fc8c72eb | 183 | ring_info->ring_buffer->write_index; |
82f8bd40 | 184 | debug_info->current_interrupt_mask = |
fc8c72eb | 185 | ring_info->ring_buffer->interrupt_mask; |
3e7ee490 HJ |
186 | } |
187 | } | |
4827ee1d | 188 | EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); |
3e7ee490 | 189 | |
822f18d4 | 190 | /* Initialize the ring buffer. */ |
72a95cbc | 191 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, |
9988ce68 | 192 | struct page *pages, u32 page_cnt) |
3e7ee490 | 193 | { |
9988ce68 VK |
194 | int i; |
195 | struct page **pages_wraparound; | |
196 | ||
197 | BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE)); | |
3e7ee490 | 198 | |
fc8c72eb | 199 | memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); |
3e7ee490 | 200 | |
9988ce68 VK |
201 | /* |
202 | * First page holds struct hv_ring_buffer, do wraparound mapping for | |
203 | * the rest. | |
204 | */ | |
6396bb22 | 205 | pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *), |
9988ce68 VK |
206 | GFP_KERNEL); |
207 | if (!pages_wraparound) | |
208 | return -ENOMEM; | |
209 | ||
210 | pages_wraparound[0] = pages; | |
211 | for (i = 0; i < 2 * (page_cnt - 1); i++) | |
212 | pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1]; | |
213 | ||
214 | ring_info->ring_buffer = (struct hv_ring_buffer *) | |
215 | vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL); | |
216 | ||
217 | kfree(pages_wraparound); | |
218 | ||
219 | ||
220 | if (!ring_info->ring_buffer) | |
221 | return -ENOMEM; | |
222 | ||
fc8c72eb HZ |
223 | ring_info->ring_buffer->read_index = |
224 | ring_info->ring_buffer->write_index = 0; | |
3e7ee490 | 225 | |
822f18d4 | 226 | /* Set the feature bit for enabling flow control. */ |
046c7911 S |
227 | ring_info->ring_buffer->feature_bits.value = 1; |
228 | ||
9988ce68 | 229 | ring_info->ring_size = page_cnt << PAGE_SHIFT; |
63273cb4 LL |
230 | ring_info->ring_size_div10_reciprocal = |
231 | reciprocal_value(ring_info->ring_size / 10); | |
9988ce68 VK |
232 | ring_info->ring_datasize = ring_info->ring_size - |
233 | sizeof(struct hv_ring_buffer); | |
3e7ee490 | 234 | |
fc8c72eb | 235 | spin_lock_init(&ring_info->ring_lock); |
3e7ee490 HJ |
236 | |
237 | return 0; | |
238 | } | |
239 | ||
822f18d4 | 240 | /* Cleanup the ring buffer. */ |
2dba688b | 241 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 242 | { |
9988ce68 | 243 | vunmap(ring_info->ring_buffer); |
3e7ee490 HJ |
244 | } |
245 | ||
822f18d4 | 246 | /* Write to the ring buffer. */ |
1f6ee4e7 | 247 | int hv_ringbuffer_write(struct vmbus_channel *channel, |
e4165a0f | 248 | const struct kvec *kv_list, u32 kv_count) |
3e7ee490 | 249 | { |
2c616a8b | 250 | int i; |
fc8c72eb | 251 | u32 bytes_avail_towrite; |
2c616a8b | 252 | u32 totalbytes_towrite = sizeof(u64); |
66a60543 | 253 | u32 next_write_location; |
98fa8cf4 | 254 | u32 old_write; |
2c616a8b SH |
255 | u64 prev_indices; |
256 | unsigned long flags; | |
1f6ee4e7 | 257 | struct hv_ring_buffer_info *outring_info = &channel->outbound; |
3e7ee490 | 258 | |
e7e97dd8 S |
259 | if (channel->rescind) |
260 | return -ENODEV; | |
261 | ||
011a7c3c S |
262 | for (i = 0; i < kv_count; i++) |
263 | totalbytes_towrite += kv_list[i].iov_len; | |
3e7ee490 | 264 | |
5529eaf6 | 265 | spin_lock_irqsave(&outring_info->ring_lock, flags); |
3e7ee490 | 266 | |
a6341f00 | 267 | bytes_avail_towrite = hv_get_bytes_to_write(outring_info); |
3e7ee490 | 268 | |
822f18d4 VK |
269 | /* |
270 | * If there is only room for the packet, assume it is full. | |
271 | * Otherwise, the next time around, we think the ring buffer | |
272 | * is empty since the read index == write index. | |
273 | */ | |
fc8c72eb | 274 | if (bytes_avail_towrite <= totalbytes_towrite) { |
5529eaf6 | 275 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
d2598f01 | 276 | return -EAGAIN; |
3e7ee490 HJ |
277 | } |
278 | ||
454f18a9 | 279 | /* Write to the ring buffer */ |
2b8a912e | 280 | next_write_location = hv_get_next_write_location(outring_info); |
3e7ee490 | 281 | |
98fa8cf4 S |
282 | old_write = next_write_location; |
283 | ||
011a7c3c | 284 | for (i = 0; i < kv_count; i++) { |
2b8a912e | 285 | next_write_location = hv_copyto_ringbuffer(outring_info, |
fc8c72eb | 286 | next_write_location, |
011a7c3c S |
287 | kv_list[i].iov_base, |
288 | kv_list[i].iov_len); | |
3e7ee490 HJ |
289 | } |
290 | ||
454f18a9 | 291 | /* Set previous packet start */ |
2b8a912e | 292 | prev_indices = hv_get_ring_bufferindices(outring_info); |
3e7ee490 | 293 | |
2b8a912e | 294 | next_write_location = hv_copyto_ringbuffer(outring_info, |
fc8c72eb HZ |
295 | next_write_location, |
296 | &prev_indices, | |
b219b3f7 | 297 | sizeof(u64)); |
3e7ee490 | 298 | |
98fa8cf4 | 299 | /* Issue a full memory barrier before updating the write index */ |
dcd0eeca | 300 | virt_mb(); |
3e7ee490 | 301 | |
454f18a9 | 302 | /* Now, update the write location */ |
2b8a912e | 303 | hv_set_next_write_location(outring_info, next_write_location); |
3e7ee490 | 304 | |
3e7ee490 | 305 | |
5529eaf6 | 306 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
98fa8cf4 | 307 | |
b103a56f | 308 | hv_signal_on_write(old_write, channel); |
e7e97dd8 S |
309 | |
310 | if (channel->rescind) | |
311 | return -ENODEV; | |
312 | ||
3e7ee490 HJ |
313 | return 0; |
314 | } | |
315 | ||
3372592a | 316 | int hv_ringbuffer_read(struct vmbus_channel *channel, |
940b68e2 | 317 | void *buffer, u32 buflen, u32 *buffer_actual_len, |
3372592a | 318 | u64 *requestid, bool raw) |
3e7ee490 | 319 | { |
4226ff69 SH |
320 | struct vmpacket_descriptor *desc; |
321 | u32 packetlen, offset; | |
322 | ||
323 | if (unlikely(buflen == 0)) | |
a16e1485 | 324 | return -EINVAL; |
3e7ee490 | 325 | |
940b68e2 VK |
326 | *buffer_actual_len = 0; |
327 | *requestid = 0; | |
328 | ||
454f18a9 | 329 | /* Make sure there is something to read */ |
4226ff69 SH |
330 | desc = hv_pkt_iter_first(channel); |
331 | if (desc == NULL) { | |
940b68e2 VK |
332 | /* |
333 | * No error is set when there is even no header, drivers are | |
334 | * supposed to analyze buffer_actual_len. | |
335 | */ | |
42dd2715 | 336 | return 0; |
940b68e2 | 337 | } |
3e7ee490 | 338 | |
4226ff69 SH |
339 | offset = raw ? 0 : (desc->offset8 << 3); |
340 | packetlen = (desc->len8 << 3) - offset; | |
940b68e2 | 341 | *buffer_actual_len = packetlen; |
4226ff69 | 342 | *requestid = desc->trans_id; |
940b68e2 | 343 | |
4226ff69 | 344 | if (unlikely(packetlen > buflen)) |
3eba9a77 | 345 | return -ENOBUFS; |
3e7ee490 | 346 | |
4226ff69 SH |
347 | /* since ring is double mapped, only one copy is necessary */ |
348 | memcpy(buffer, (const char *)desc + offset, packetlen); | |
3e7ee490 | 349 | |
4226ff69 SH |
350 | /* Advance ring index to next packet descriptor */ |
351 | __hv_pkt_iter_next(channel, desc); | |
3e7ee490 | 352 | |
4226ff69 SH |
353 | /* Notify host of update */ |
354 | hv_pkt_iter_close(channel); | |
c2b8e520 | 355 | |
42dd2715 | 356 | return 0; |
b5f53dde | 357 | } |
f3dd3f47 | 358 | |
359 | /* | |
360 | * Determine number of bytes available in ring buffer after | |
361 | * the current iterator (priv_read_index) location. | |
362 | * | |
363 | * This is similar to hv_get_bytes_to_read but with private | |
364 | * read index instead. | |
365 | */ | |
366 | static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) | |
367 | { | |
368 | u32 priv_read_loc = rbi->priv_read_index; | |
369 | u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index); | |
370 | ||
371 | if (write_loc >= priv_read_loc) | |
372 | return write_loc - priv_read_loc; | |
373 | else | |
374 | return (rbi->ring_datasize - priv_read_loc) + write_loc; | |
375 | } | |
376 | ||
377 | /* | |
378 | * Get first vmbus packet from ring buffer after read_index | |
379 | * | |
380 | * If ring buffer is empty, returns NULL and no other action needed. | |
381 | */ | |
382 | struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel) | |
383 | { | |
384 | struct hv_ring_buffer_info *rbi = &channel->inbound; | |
15e1674d | 385 | struct vmpacket_descriptor *desc; |
f3dd3f47 | 386 | |
f3dd3f47 | 387 | if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor)) |
388 | return NULL; | |
389 | ||
15e1674d SH |
390 | desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index; |
391 | if (desc) | |
392 | prefetch((char *)desc + (desc->len8 << 3)); | |
393 | ||
394 | return desc; | |
f3dd3f47 | 395 | } |
396 | EXPORT_SYMBOL_GPL(hv_pkt_iter_first); | |
397 | ||
398 | /* | |
399 | * Get next vmbus packet from ring buffer. | |
400 | * | |
401 | * Advances the current location (priv_read_index) and checks for more | |
402 | * data. If the end of the ring buffer is reached, then return NULL. | |
403 | */ | |
404 | struct vmpacket_descriptor * | |
405 | __hv_pkt_iter_next(struct vmbus_channel *channel, | |
406 | const struct vmpacket_descriptor *desc) | |
407 | { | |
408 | struct hv_ring_buffer_info *rbi = &channel->inbound; | |
409 | u32 packetlen = desc->len8 << 3; | |
410 | u32 dsize = rbi->ring_datasize; | |
411 | ||
412 | /* bump offset to next potential packet */ | |
413 | rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER; | |
414 | if (rbi->priv_read_index >= dsize) | |
415 | rbi->priv_read_index -= dsize; | |
416 | ||
417 | /* more data? */ | |
05d00bc9 | 418 | return hv_pkt_iter_first(channel); |
f3dd3f47 | 419 | } |
420 | EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); | |
421 | ||
655296c8 MK |
422 | /* How many bytes were read in this iterator cycle */ |
423 | static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi, | |
424 | u32 start_read_index) | |
425 | { | |
426 | if (rbi->priv_read_index >= start_read_index) | |
427 | return rbi->priv_read_index - start_read_index; | |
428 | else | |
429 | return rbi->ring_datasize - start_read_index + | |
430 | rbi->priv_read_index; | |
431 | } | |
432 | ||
f3dd3f47 | 433 | /* |
71b38245 MK |
434 | * Update host ring buffer after iterating over packets. If the host has |
435 | * stopped queuing new entries because it found the ring buffer full, and | |
436 | * sufficient space is being freed up, signal the host. But be careful to | |
437 | * only signal the host when necessary, both for performance reasons and | |
438 | * because Hyper-V protects itself by throttling guests that signal | |
439 | * inappropriately. | |
440 | * | |
441 | * Determining when to signal is tricky. There are three key data inputs | |
442 | * that must be handled in this order to avoid race conditions: | |
443 | * | |
444 | * 1. Update the read_index | |
445 | * 2. Read the pending_send_sz | |
446 | * 3. Read the current write_index | |
447 | * | |
448 | * The interrupt_mask is not used to determine when to signal. The | |
449 | * interrupt_mask is used only on the guest->host ring buffer when | |
450 | * sending requests to the host. The host does not use it on the host-> | |
451 | * guest ring buffer to indicate whether it should be signaled. | |
f3dd3f47 | 452 | */ |
453 | void hv_pkt_iter_close(struct vmbus_channel *channel) | |
454 | { | |
455 | struct hv_ring_buffer_info *rbi = &channel->inbound; | |
655296c8 | 456 | u32 curr_write_sz, pending_sz, bytes_read, start_read_index; |
f3dd3f47 | 457 | |
458 | /* | |
459 | * Make sure all reads are done before we update the read index since | |
460 | * the writer may start writing to the read area once the read index | |
461 | * is updated. | |
462 | */ | |
463 | virt_rmb(); | |
655296c8 | 464 | start_read_index = rbi->ring_buffer->read_index; |
f3dd3f47 | 465 | rbi->ring_buffer->read_index = rbi->priv_read_index; |
466 | ||
71b38245 MK |
467 | /* |
468 | * Older versions of Hyper-V (before WS2102 and Win8) do not | |
469 | * implement pending_send_sz and simply poll if the host->guest | |
470 | * ring buffer is full. No signaling is needed or expected. | |
471 | */ | |
655296c8 MK |
472 | if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz) |
473 | return; | |
474 | ||
8dd45f2a SH |
475 | /* |
476 | * Issue a full memory barrier before making the signaling decision. | |
71b38245 MK |
477 | * If reading pending_send_sz were to be reordered and happen |
478 | * before we commit the new read_index, a race could occur. If the | |
479 | * host were to set the pending_send_sz after we have sampled | |
480 | * pending_send_sz, and the ring buffer blocks before we commit the | |
8dd45f2a SH |
481 | * read index, we could miss sending the interrupt. Issue a full |
482 | * memory barrier to address this. | |
483 | */ | |
484 | virt_mb(); | |
485 | ||
71b38245 MK |
486 | /* |
487 | * If the pending_send_sz is zero, then the ring buffer is not | |
488 | * blocked and there is no need to signal. This is far by the | |
489 | * most common case, so exit quickly for best performance. | |
490 | */ | |
655296c8 MK |
491 | pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); |
492 | if (!pending_sz) | |
8dd45f2a SH |
493 | return; |
494 | ||
655296c8 MK |
495 | /* |
496 | * Ensure the read of write_index in hv_get_bytes_to_write() | |
497 | * happens after the read of pending_send_sz. | |
498 | */ | |
499 | virt_rmb(); | |
500 | curr_write_sz = hv_get_bytes_to_write(rbi); | |
501 | bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index); | |
8dd45f2a | 502 | |
655296c8 | 503 | /* |
71b38245 MK |
504 | * We want to signal the host only if we're transitioning |
505 | * from a "not enough free space" state to a "enough free | |
506 | * space" state. For example, it's possible that this function | |
507 | * could run and free up enough space to signal the host, and then | |
508 | * run again and free up additional space before the host has a | |
509 | * chance to clear the pending_send_sz. The 2nd invocation would | |
510 | * be a null transition from "enough free space" to "enough free | |
511 | * space", which doesn't warrant a signal. | |
512 | * | |
513 | * Exactly filling the ring buffer is treated as "not enough | |
514 | * space". The ring buffer always must have at least one byte | |
515 | * empty so the empty and full conditions are distinguishable. | |
516 | * hv_get_bytes_to_write() doesn't fully tell the truth in | |
517 | * this regard. | |
518 | * | |
519 | * So first check if we were in the "enough free space" state | |
520 | * before we began the iteration. If so, the host was not | |
521 | * blocked, and there's no need to signal. | |
655296c8 | 522 | */ |
655296c8 MK |
523 | if (curr_write_sz - bytes_read > pending_sz) |
524 | return; | |
525 | ||
71b38245 MK |
526 | /* |
527 | * Similarly, if the new state is "not enough space", then | |
528 | * there's no need to signal. | |
529 | */ | |
655296c8 MK |
530 | if (curr_write_sz <= pending_sz) |
531 | return; | |
03bad714 SH |
532 | |
533 | vmbus_setevent(channel); | |
f3dd3f47 | 534 | } |
535 | EXPORT_SYMBOL_GPL(hv_pkt_iter_close); |