]>
Commit | Line | Data |
---|---|---|
3e7ee490 HJ |
1 | /* |
2 | * | |
3 | * Copyright (c) 2009, Microsoft Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
17 | * | |
18 | * Authors: | |
19 | * Haiyang Zhang <[email protected]> | |
20 | * Hank Janssen <[email protected]> | |
b2a5a585 | 21 | * K. Y. Srinivasan <[email protected]> |
3e7ee490 HJ |
22 | * |
23 | */ | |
0a46618d | 24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
3e7ee490 | 25 | |
a0086dc5 GKH |
26 | #include <linux/kernel.h> |
27 | #include <linux/mm.h> | |
46a97191 | 28 | #include <linux/hyperv.h> |
011a7c3c | 29 | #include <linux/uio.h> |
9988ce68 VK |
30 | #include <linux/vmalloc.h> |
31 | #include <linux/slab.h> | |
3f335ea2 | 32 | |
0f2a6619 | 33 | #include "hyperv_vmbus.h" |
3e7ee490 | 34 | |
f3dd3f47 | 35 | #define VMBUS_PKT_TRAILER 8 |
36 | ||
98fa8cf4 S |
37 | /* |
38 | * When we write to the ring buffer, check if the host needs to | |
39 | * be signaled. Here is the details of this protocol: | |
40 | * | |
41 | * 1. The host guarantees that while it is draining the | |
42 | * ring buffer, it will set the interrupt_mask to | |
43 | * indicate it does not need to be interrupted when | |
44 | * new data is placed. | |
45 | * | |
46 | * 2. The host guarantees that it will completely drain | |
47 | * the ring buffer before exiting the read loop. Further, | |
48 | * once the ring buffer is empty, it will clear the | |
49 | * interrupt_mask and re-check to see if new data has | |
50 | * arrived. | |
1f6ee4e7 S |
51 | * |
52 | * KYS: Oct. 30, 2016: | |
53 | * It looks like Windows hosts have logic to deal with DOS attacks that | |
54 | * can be triggered if it receives interrupts when it is not expecting | |
55 | * the interrupt. The host expects interrupts only when the ring | |
56 | * transitions from empty to non-empty (or full to non full on the guest | |
57 | * to host ring). | |
58 | * So, base the signaling decision solely on the ring state until the | |
59 | * host logic is fixed. | |
98fa8cf4 S |
60 | */ |
61 | ||
b103a56f | 62 | static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel) |
98fa8cf4 | 63 | { |
1f6ee4e7 S |
64 | struct hv_ring_buffer_info *rbi = &channel->outbound; |
65 | ||
dcd0eeca | 66 | virt_mb(); |
d45faaee | 67 | if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) |
1f6ee4e7 | 68 | return; |
98fa8cf4 | 69 | |
e91e84fa | 70 | /* check interrupt_mask before read_index */ |
dcd0eeca | 71 | virt_rmb(); |
98fa8cf4 S |
72 | /* |
73 | * This is the only case we need to signal when the | |
74 | * ring transitions from being empty to non-empty. | |
75 | */ | |
d45faaee | 76 | if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) |
1f6ee4e7 | 77 | vmbus_setevent(channel); |
98fa8cf4 S |
78 | } |
79 | ||
822f18d4 | 80 | /* Get the next write location for the specified ring buffer. */ |
4d643114 | 81 | static inline u32 |
2b8a912e | 82 | hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 83 | { |
fc8c72eb | 84 | u32 next = ring_info->ring_buffer->write_index; |
3e7ee490 | 85 | |
3e7ee490 HJ |
86 | return next; |
87 | } | |
88 | ||
822f18d4 | 89 | /* Set the next write location for the specified ring buffer. */ |
3e7ee490 | 90 | static inline void |
2b8a912e | 91 | hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, |
fc8c72eb | 92 | u32 next_write_location) |
3e7ee490 | 93 | { |
fc8c72eb | 94 | ring_info->ring_buffer->write_index = next_write_location; |
3e7ee490 HJ |
95 | } |
96 | ||
822f18d4 | 97 | /* Get the next read location for the specified ring buffer. */ |
4d643114 | 98 | static inline u32 |
e4165a0f | 99 | hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 100 | { |
e4165a0f | 101 | return ring_info->ring_buffer->read_index; |
3e7ee490 HJ |
102 | } |
103 | ||
b2a5a585 | 104 | /* |
b2a5a585 | 105 | * Get the next read location + offset for the specified ring buffer. |
822f18d4 | 106 | * This allows the caller to skip. |
b2a5a585 | 107 | */ |
4d643114 | 108 | static inline u32 |
e4165a0f SH |
109 | hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info, |
110 | u32 offset) | |
3e7ee490 | 111 | { |
fc8c72eb | 112 | u32 next = ring_info->ring_buffer->read_index; |
3e7ee490 | 113 | |
fc8c72eb | 114 | next += offset; |
8d12f882 SH |
115 | if (next >= ring_info->ring_datasize) |
116 | next -= ring_info->ring_datasize; | |
3e7ee490 HJ |
117 | |
118 | return next; | |
119 | } | |
120 | ||
822f18d4 | 121 | /* Set the next read location for the specified ring buffer. */ |
3e7ee490 | 122 | static inline void |
2b8a912e | 123 | hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, |
fc8c72eb | 124 | u32 next_read_location) |
3e7ee490 | 125 | { |
fc8c72eb | 126 | ring_info->ring_buffer->read_index = next_read_location; |
ab028db4 | 127 | ring_info->priv_read_index = next_read_location; |
3e7ee490 HJ |
128 | } |
129 | ||
822f18d4 | 130 | /* Get the size of the ring buffer. */ |
4d643114 | 131 | static inline u32 |
e4165a0f | 132 | hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 133 | { |
fc8c72eb | 134 | return ring_info->ring_datasize; |
3e7ee490 HJ |
135 | } |
136 | ||
822f18d4 | 137 | /* Get the read and write indices as u64 of the specified ring buffer. */ |
59471438 | 138 | static inline u64 |
2b8a912e | 139 | hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 140 | { |
fc8c72eb | 141 | return (u64)ring_info->ring_buffer->write_index << 32; |
3e7ee490 HJ |
142 | } |
143 | ||
8f1136ae | 144 | /* |
8f1136ae S |
145 | * Helper routine to copy to source from ring buffer. |
146 | * Assume there is enough room. Handles wrap-around in src case only!! | |
8f1136ae S |
147 | */ |
148 | static u32 hv_copyfrom_ringbuffer( | |
e4165a0f | 149 | const struct hv_ring_buffer_info *ring_info, |
8f1136ae S |
150 | void *dest, |
151 | u32 destlen, | |
152 | u32 start_read_offset) | |
153 | { | |
154 | void *ring_buffer = hv_get_ring_buffer(ring_info); | |
155 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); | |
156 | ||
f24f0b49 | 157 | memcpy(dest, ring_buffer + start_read_offset, destlen); |
8f1136ae S |
158 | |
159 | start_read_offset += destlen; | |
8d12f882 SH |
160 | if (start_read_offset >= ring_buffer_size) |
161 | start_read_offset -= ring_buffer_size; | |
8f1136ae S |
162 | |
163 | return start_read_offset; | |
164 | } | |
165 | ||
166 | ||
7581578d | 167 | /* |
7581578d S |
168 | * Helper routine to copy from source to ring buffer. |
169 | * Assume there is enough room. Handles wrap-around in dest case only!! | |
7581578d S |
170 | */ |
171 | static u32 hv_copyto_ringbuffer( | |
fc8c72eb HZ |
172 | struct hv_ring_buffer_info *ring_info, |
173 | u32 start_write_offset, | |
e4165a0f | 174 | const void *src, |
7581578d S |
175 | u32 srclen) |
176 | { | |
177 | void *ring_buffer = hv_get_ring_buffer(ring_info); | |
178 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); | |
f24f0b49 VK |
179 | |
180 | memcpy(ring_buffer + start_write_offset, src, srclen); | |
3e7ee490 | 181 | |
7581578d | 182 | start_write_offset += srclen; |
8d12f882 SH |
183 | if (start_write_offset >= ring_buffer_size) |
184 | start_write_offset -= ring_buffer_size; | |
7581578d S |
185 | |
186 | return start_write_offset; | |
187 | } | |
3e7ee490 | 188 | |
822f18d4 | 189 | /* Get various debug metrics for the specified ring buffer. */ |
e4165a0f SH |
190 | void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, |
191 | struct hv_ring_buffer_debug_info *debug_info) | |
3e7ee490 | 192 | { |
fc8c72eb HZ |
193 | u32 bytes_avail_towrite; |
194 | u32 bytes_avail_toread; | |
3e7ee490 | 195 | |
fc8c72eb | 196 | if (ring_info->ring_buffer) { |
2b8a912e | 197 | hv_get_ringbuffer_availbytes(ring_info, |
fc8c72eb HZ |
198 | &bytes_avail_toread, |
199 | &bytes_avail_towrite); | |
3e7ee490 | 200 | |
fc8c72eb HZ |
201 | debug_info->bytes_avail_toread = bytes_avail_toread; |
202 | debug_info->bytes_avail_towrite = bytes_avail_towrite; | |
82f8bd40 | 203 | debug_info->current_read_index = |
fc8c72eb | 204 | ring_info->ring_buffer->read_index; |
82f8bd40 | 205 | debug_info->current_write_index = |
fc8c72eb | 206 | ring_info->ring_buffer->write_index; |
82f8bd40 | 207 | debug_info->current_interrupt_mask = |
fc8c72eb | 208 | ring_info->ring_buffer->interrupt_mask; |
3e7ee490 HJ |
209 | } |
210 | } | |
4827ee1d | 211 | EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); |
3e7ee490 | 212 | |
822f18d4 | 213 | /* Initialize the ring buffer. */ |
72a95cbc | 214 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, |
9988ce68 | 215 | struct page *pages, u32 page_cnt) |
3e7ee490 | 216 | { |
9988ce68 VK |
217 | int i; |
218 | struct page **pages_wraparound; | |
219 | ||
220 | BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE)); | |
3e7ee490 | 221 | |
fc8c72eb | 222 | memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); |
3e7ee490 | 223 | |
9988ce68 VK |
224 | /* |
225 | * First page holds struct hv_ring_buffer, do wraparound mapping for | |
226 | * the rest. | |
227 | */ | |
228 | pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1), | |
229 | GFP_KERNEL); | |
230 | if (!pages_wraparound) | |
231 | return -ENOMEM; | |
232 | ||
233 | pages_wraparound[0] = pages; | |
234 | for (i = 0; i < 2 * (page_cnt - 1); i++) | |
235 | pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1]; | |
236 | ||
237 | ring_info->ring_buffer = (struct hv_ring_buffer *) | |
238 | vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL); | |
239 | ||
240 | kfree(pages_wraparound); | |
241 | ||
242 | ||
243 | if (!ring_info->ring_buffer) | |
244 | return -ENOMEM; | |
245 | ||
fc8c72eb HZ |
246 | ring_info->ring_buffer->read_index = |
247 | ring_info->ring_buffer->write_index = 0; | |
3e7ee490 | 248 | |
822f18d4 | 249 | /* Set the feature bit for enabling flow control. */ |
046c7911 S |
250 | ring_info->ring_buffer->feature_bits.value = 1; |
251 | ||
9988ce68 VK |
252 | ring_info->ring_size = page_cnt << PAGE_SHIFT; |
253 | ring_info->ring_datasize = ring_info->ring_size - | |
254 | sizeof(struct hv_ring_buffer); | |
3e7ee490 | 255 | |
fc8c72eb | 256 | spin_lock_init(&ring_info->ring_lock); |
3e7ee490 HJ |
257 | |
258 | return 0; | |
259 | } | |
260 | ||
822f18d4 | 261 | /* Cleanup the ring buffer. */ |
2dba688b | 262 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 263 | { |
9988ce68 | 264 | vunmap(ring_info->ring_buffer); |
3e7ee490 HJ |
265 | } |
266 | ||
822f18d4 | 267 | /* Write to the ring buffer. */ |
1f6ee4e7 | 268 | int hv_ringbuffer_write(struct vmbus_channel *channel, |
e4165a0f | 269 | const struct kvec *kv_list, u32 kv_count) |
3e7ee490 | 270 | { |
2c616a8b | 271 | int i; |
fc8c72eb | 272 | u32 bytes_avail_towrite; |
2c616a8b | 273 | u32 totalbytes_towrite = sizeof(u64); |
66a60543 | 274 | u32 next_write_location; |
98fa8cf4 | 275 | u32 old_write; |
2c616a8b SH |
276 | u64 prev_indices; |
277 | unsigned long flags; | |
1f6ee4e7 | 278 | struct hv_ring_buffer_info *outring_info = &channel->outbound; |
3e7ee490 | 279 | |
e7e97dd8 S |
280 | if (channel->rescind) |
281 | return -ENODEV; | |
282 | ||
011a7c3c S |
283 | for (i = 0; i < kv_count; i++) |
284 | totalbytes_towrite += kv_list[i].iov_len; | |
3e7ee490 | 285 | |
5529eaf6 | 286 | spin_lock_irqsave(&outring_info->ring_lock, flags); |
3e7ee490 | 287 | |
a6341f00 | 288 | bytes_avail_towrite = hv_get_bytes_to_write(outring_info); |
3e7ee490 | 289 | |
822f18d4 VK |
290 | /* |
291 | * If there is only room for the packet, assume it is full. | |
292 | * Otherwise, the next time around, we think the ring buffer | |
293 | * is empty since the read index == write index. | |
294 | */ | |
fc8c72eb | 295 | if (bytes_avail_towrite <= totalbytes_towrite) { |
5529eaf6 | 296 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
d2598f01 | 297 | return -EAGAIN; |
3e7ee490 HJ |
298 | } |
299 | ||
454f18a9 | 300 | /* Write to the ring buffer */ |
2b8a912e | 301 | next_write_location = hv_get_next_write_location(outring_info); |
3e7ee490 | 302 | |
98fa8cf4 S |
303 | old_write = next_write_location; |
304 | ||
011a7c3c | 305 | for (i = 0; i < kv_count; i++) { |
2b8a912e | 306 | next_write_location = hv_copyto_ringbuffer(outring_info, |
fc8c72eb | 307 | next_write_location, |
011a7c3c S |
308 | kv_list[i].iov_base, |
309 | kv_list[i].iov_len); | |
3e7ee490 HJ |
310 | } |
311 | ||
454f18a9 | 312 | /* Set previous packet start */ |
2b8a912e | 313 | prev_indices = hv_get_ring_bufferindices(outring_info); |
3e7ee490 | 314 | |
2b8a912e | 315 | next_write_location = hv_copyto_ringbuffer(outring_info, |
fc8c72eb HZ |
316 | next_write_location, |
317 | &prev_indices, | |
b219b3f7 | 318 | sizeof(u64)); |
3e7ee490 | 319 | |
98fa8cf4 | 320 | /* Issue a full memory barrier before updating the write index */ |
dcd0eeca | 321 | virt_mb(); |
3e7ee490 | 322 | |
454f18a9 | 323 | /* Now, update the write location */ |
2b8a912e | 324 | hv_set_next_write_location(outring_info, next_write_location); |
3e7ee490 | 325 | |
3e7ee490 | 326 | |
5529eaf6 | 327 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
98fa8cf4 | 328 | |
b103a56f | 329 | hv_signal_on_write(old_write, channel); |
e7e97dd8 S |
330 | |
331 | if (channel->rescind) | |
332 | return -ENODEV; | |
333 | ||
3e7ee490 HJ |
334 | return 0; |
335 | } | |
336 | ||
f3dd3f47 | 337 | static inline void |
338 | init_cached_read_index(struct hv_ring_buffer_info *rbi) | |
339 | { | |
340 | rbi->cached_read_index = rbi->ring_buffer->read_index; | |
341 | } | |
342 | ||
3372592a | 343 | int hv_ringbuffer_read(struct vmbus_channel *channel, |
940b68e2 | 344 | void *buffer, u32 buflen, u32 *buffer_actual_len, |
3372592a | 345 | u64 *requestid, bool raw) |
3e7ee490 | 346 | { |
fc8c72eb | 347 | u32 bytes_avail_toread; |
2c616a8b | 348 | u32 next_read_location; |
fc8c72eb | 349 | u64 prev_indices = 0; |
940b68e2 VK |
350 | struct vmpacket_descriptor desc; |
351 | u32 offset; | |
352 | u32 packetlen; | |
3372592a | 353 | struct hv_ring_buffer_info *inring_info = &channel->inbound; |
3e7ee490 | 354 | |
fc8c72eb | 355 | if (buflen <= 0) |
a16e1485 | 356 | return -EINVAL; |
3e7ee490 | 357 | |
940b68e2 VK |
358 | *buffer_actual_len = 0; |
359 | *requestid = 0; | |
360 | ||
a6341f00 | 361 | bytes_avail_toread = hv_get_bytes_to_read(inring_info); |
454f18a9 | 362 | /* Make sure there is something to read */ |
940b68e2 VK |
363 | if (bytes_avail_toread < sizeof(desc)) { |
364 | /* | |
365 | * No error is set when there is even no header, drivers are | |
366 | * supposed to analyze buffer_actual_len. | |
367 | */ | |
42dd2715 | 368 | return 0; |
940b68e2 | 369 | } |
3e7ee490 | 370 | |
f3dd3f47 | 371 | init_cached_read_index(inring_info); |
372 | ||
940b68e2 VK |
373 | next_read_location = hv_get_next_read_location(inring_info); |
374 | next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, | |
375 | sizeof(desc), | |
376 | next_read_location); | |
377 | ||
378 | offset = raw ? 0 : (desc.offset8 << 3); | |
379 | packetlen = (desc.len8 << 3) - offset; | |
380 | *buffer_actual_len = packetlen; | |
381 | *requestid = desc.trans_id; | |
382 | ||
3eba9a77 S |
383 | if (bytes_avail_toread < packetlen + offset) |
384 | return -EAGAIN; | |
940b68e2 | 385 | |
3eba9a77 S |
386 | if (packetlen > buflen) |
387 | return -ENOBUFS; | |
3e7ee490 | 388 | |
1ac58644 | 389 | next_read_location = |
2b8a912e | 390 | hv_get_next_readlocation_withoffset(inring_info, offset); |
3e7ee490 | 391 | |
2b8a912e | 392 | next_read_location = hv_copyfrom_ringbuffer(inring_info, |
fc8c72eb | 393 | buffer, |
940b68e2 | 394 | packetlen, |
fc8c72eb | 395 | next_read_location); |
3e7ee490 | 396 | |
2b8a912e | 397 | next_read_location = hv_copyfrom_ringbuffer(inring_info, |
fc8c72eb | 398 | &prev_indices, |
4408f531 | 399 | sizeof(u64), |
fc8c72eb | 400 | next_read_location); |
3e7ee490 | 401 | |
822f18d4 VK |
402 | /* |
403 | * Make sure all reads are done before we update the read index since | |
404 | * the writer may start writing to the read area once the read index | |
405 | * is updated. | |
406 | */ | |
dcd0eeca | 407 | virt_mb(); |
3e7ee490 | 408 | |
454f18a9 | 409 | /* Update the read index */ |
2b8a912e | 410 | hv_set_next_read_location(inring_info, next_read_location); |
3e7ee490 | 411 | |
3372592a | 412 | hv_signal_on_read(channel); |
c2b8e520 | 413 | |
42dd2715 | 414 | return 0; |
b5f53dde | 415 | } |
f3dd3f47 | 416 | |
417 | /* | |
418 | * Determine number of bytes available in ring buffer after | |
419 | * the current iterator (priv_read_index) location. | |
420 | * | |
421 | * This is similar to hv_get_bytes_to_read but with private | |
422 | * read index instead. | |
423 | */ | |
424 | static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) | |
425 | { | |
426 | u32 priv_read_loc = rbi->priv_read_index; | |
427 | u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index); | |
428 | ||
429 | if (write_loc >= priv_read_loc) | |
430 | return write_loc - priv_read_loc; | |
431 | else | |
432 | return (rbi->ring_datasize - priv_read_loc) + write_loc; | |
433 | } | |
434 | ||
435 | /* | |
436 | * Get first vmbus packet from ring buffer after read_index | |
437 | * | |
438 | * If ring buffer is empty, returns NULL and no other action needed. | |
439 | */ | |
440 | struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel) | |
441 | { | |
442 | struct hv_ring_buffer_info *rbi = &channel->inbound; | |
443 | ||
444 | /* set state for later hv_signal_on_read() */ | |
445 | init_cached_read_index(rbi); | |
446 | ||
447 | if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor)) | |
448 | return NULL; | |
449 | ||
450 | return hv_get_ring_buffer(rbi) + rbi->priv_read_index; | |
451 | } | |
452 | EXPORT_SYMBOL_GPL(hv_pkt_iter_first); | |
453 | ||
454 | /* | |
455 | * Get next vmbus packet from ring buffer. | |
456 | * | |
457 | * Advances the current location (priv_read_index) and checks for more | |
458 | * data. If the end of the ring buffer is reached, then return NULL. | |
459 | */ | |
460 | struct vmpacket_descriptor * | |
461 | __hv_pkt_iter_next(struct vmbus_channel *channel, | |
462 | const struct vmpacket_descriptor *desc) | |
463 | { | |
464 | struct hv_ring_buffer_info *rbi = &channel->inbound; | |
465 | u32 packetlen = desc->len8 << 3; | |
466 | u32 dsize = rbi->ring_datasize; | |
467 | ||
468 | /* bump offset to next potential packet */ | |
469 | rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER; | |
470 | if (rbi->priv_read_index >= dsize) | |
471 | rbi->priv_read_index -= dsize; | |
472 | ||
473 | /* more data? */ | |
474 | if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor)) | |
475 | return NULL; | |
476 | else | |
477 | return hv_get_ring_buffer(rbi) + rbi->priv_read_index; | |
478 | } | |
479 | EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); | |
480 | ||
481 | /* | |
482 | * Update host ring buffer after iterating over packets. | |
483 | */ | |
484 | void hv_pkt_iter_close(struct vmbus_channel *channel) | |
485 | { | |
486 | struct hv_ring_buffer_info *rbi = &channel->inbound; | |
487 | ||
488 | /* | |
489 | * Make sure all reads are done before we update the read index since | |
490 | * the writer may start writing to the read area once the read index | |
491 | * is updated. | |
492 | */ | |
493 | virt_rmb(); | |
494 | rbi->ring_buffer->read_index = rbi->priv_read_index; | |
495 | ||
496 | hv_signal_on_read(channel); | |
497 | } | |
498 | EXPORT_SYMBOL_GPL(hv_pkt_iter_close); |