]>
Commit | Line | Data |
---|---|---|
3e7ee490 HJ |
1 | /* |
2 | * | |
3 | * Copyright (c) 2009, Microsoft Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
17 | * | |
18 | * Authors: | |
19 | * Haiyang Zhang <[email protected]> | |
20 | * Hank Janssen <[email protected]> | |
b2a5a585 | 21 | * K. Y. Srinivasan <[email protected]> |
3e7ee490 HJ |
22 | * |
23 | */ | |
0a46618d | 24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
3e7ee490 | 25 | |
a0086dc5 GKH |
26 | #include <linux/kernel.h> |
27 | #include <linux/mm.h> | |
46a97191 | 28 | #include <linux/hyperv.h> |
3f335ea2 | 29 | |
0f2a6619 | 30 | #include "hyperv_vmbus.h" |
3e7ee490 | 31 | |
6fdf3b21 S |
32 | void hv_begin_read(struct hv_ring_buffer_info *rbi) |
33 | { | |
34 | rbi->ring_buffer->interrupt_mask = 1; | |
35848f68 | 35 | mb(); |
6fdf3b21 S |
36 | } |
37 | ||
38 | u32 hv_end_read(struct hv_ring_buffer_info *rbi) | |
39 | { | |
40 | u32 read; | |
41 | u32 write; | |
42 | ||
43 | rbi->ring_buffer->interrupt_mask = 0; | |
35848f68 | 44 | mb(); |
6fdf3b21 S |
45 | |
46 | /* | |
47 | * Now check to see if the ring buffer is still empty. | |
48 | * If it is not, we raced and we need to process new | |
49 | * incoming messages. | |
50 | */ | |
51 | hv_get_ringbuffer_availbytes(rbi, &read, &write); | |
52 | ||
53 | return read; | |
54 | } | |
55 | ||
98fa8cf4 S |
56 | /* |
57 | * When we write to the ring buffer, check if the host needs to | |
58 | * be signaled. Here is the details of this protocol: | |
59 | * | |
60 | * 1. The host guarantees that while it is draining the | |
61 | * ring buffer, it will set the interrupt_mask to | |
62 | * indicate it does not need to be interrupted when | |
63 | * new data is placed. | |
64 | * | |
65 | * 2. The host guarantees that it will completely drain | |
66 | * the ring buffer before exiting the read loop. Further, | |
67 | * once the ring buffer is empty, it will clear the | |
68 | * interrupt_mask and re-check to see if new data has | |
69 | * arrived. | |
70 | */ | |
71 | ||
72 | static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) | |
73 | { | |
35848f68 | 74 | mb(); |
98fa8cf4 S |
75 | if (rbi->ring_buffer->interrupt_mask) |
76 | return false; | |
77 | ||
e91e84fa JW |
78 | /* check interrupt_mask before read_index */ |
79 | rmb(); | |
98fa8cf4 S |
80 | /* |
81 | * This is the only case we need to signal when the | |
82 | * ring transitions from being empty to non-empty. | |
83 | */ | |
84 | if (old_write == rbi->ring_buffer->read_index) | |
85 | return true; | |
86 | ||
87 | return false; | |
88 | } | |
89 | ||
c2b8e520 S |
90 | /* |
91 | * To optimize the flow management on the send-side, | |
92 | * when the sender is blocked because of lack of | |
93 | * sufficient space in the ring buffer, potential the | |
94 | * consumer of the ring buffer can signal the producer. | |
95 | * This is controlled by the following parameters: | |
96 | * | |
97 | * 1. pending_send_sz: This is the size in bytes that the | |
98 | * producer is trying to send. | |
99 | * 2. The feature bit feat_pending_send_sz set to indicate if | |
100 | * the consumer of the ring will signal when the ring | |
101 | * state transitions from being full to a state where | |
102 | * there is room for the producer to send the pending packet. | |
103 | */ | |
104 | ||
105 | static bool hv_need_to_signal_on_read(u32 old_rd, | |
106 | struct hv_ring_buffer_info *rbi) | |
107 | { | |
108 | u32 prev_write_sz; | |
109 | u32 cur_write_sz; | |
110 | u32 r_size; | |
111 | u32 write_loc = rbi->ring_buffer->write_index; | |
112 | u32 read_loc = rbi->ring_buffer->read_index; | |
113 | u32 pending_sz = rbi->ring_buffer->pending_send_sz; | |
114 | ||
115 | /* | |
116 | * If the other end is not blocked on write don't bother. | |
117 | */ | |
118 | if (pending_sz == 0) | |
119 | return false; | |
120 | ||
121 | r_size = rbi->ring_datasize; | |
122 | cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) : | |
123 | read_loc - write_loc; | |
124 | ||
125 | prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) : | |
126 | old_rd - write_loc; | |
127 | ||
128 | ||
129 | if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz)) | |
130 | return true; | |
131 | ||
132 | return false; | |
133 | } | |
3e7ee490 | 134 | |
b2a5a585 S |
135 | /* |
136 | * hv_get_next_write_location() | |
137 | * | |
138 | * Get the next write location for the specified ring buffer | |
139 | * | |
140 | */ | |
4d643114 | 141 | static inline u32 |
2b8a912e | 142 | hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 143 | { |
fc8c72eb | 144 | u32 next = ring_info->ring_buffer->write_index; |
3e7ee490 | 145 | |
3e7ee490 HJ |
146 | return next; |
147 | } | |
148 | ||
b2a5a585 S |
149 | /* |
150 | * hv_set_next_write_location() | |
151 | * | |
152 | * Set the next write location for the specified ring buffer | |
153 | * | |
154 | */ | |
3e7ee490 | 155 | static inline void |
2b8a912e | 156 | hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, |
fc8c72eb | 157 | u32 next_write_location) |
3e7ee490 | 158 | { |
fc8c72eb | 159 | ring_info->ring_buffer->write_index = next_write_location; |
3e7ee490 HJ |
160 | } |
161 | ||
b2a5a585 S |
162 | /* |
163 | * hv_get_next_read_location() | |
164 | * | |
165 | * Get the next read location for the specified ring buffer | |
166 | */ | |
4d643114 | 167 | static inline u32 |
2b8a912e | 168 | hv_get_next_read_location(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 169 | { |
fc8c72eb | 170 | u32 next = ring_info->ring_buffer->read_index; |
3e7ee490 | 171 | |
3e7ee490 HJ |
172 | return next; |
173 | } | |
174 | ||
b2a5a585 S |
175 | /* |
176 | * hv_get_next_readlocation_withoffset() | |
177 | * | |
178 | * Get the next read location + offset for the specified ring buffer. | |
179 | * This allows the caller to skip | |
180 | */ | |
4d643114 | 181 | static inline u32 |
2b8a912e | 182 | hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info, |
1ac58644 | 183 | u32 offset) |
3e7ee490 | 184 | { |
fc8c72eb | 185 | u32 next = ring_info->ring_buffer->read_index; |
3e7ee490 | 186 | |
fc8c72eb HZ |
187 | next += offset; |
188 | next %= ring_info->ring_datasize; | |
3e7ee490 HJ |
189 | |
190 | return next; | |
191 | } | |
192 | ||
b2a5a585 S |
193 | /* |
194 | * | |
195 | * hv_set_next_read_location() | |
196 | * | |
197 | * Set the next read location for the specified ring buffer | |
198 | * | |
199 | */ | |
3e7ee490 | 200 | static inline void |
2b8a912e | 201 | hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, |
fc8c72eb | 202 | u32 next_read_location) |
3e7ee490 | 203 | { |
fc8c72eb | 204 | ring_info->ring_buffer->read_index = next_read_location; |
3e7ee490 HJ |
205 | } |
206 | ||
207 | ||
b2a5a585 S |
208 | /* |
209 | * | |
210 | * hv_get_ring_buffer() | |
211 | * | |
212 | * Get the start of the ring buffer | |
213 | */ | |
8282c400 | 214 | static inline void * |
2b8a912e | 215 | hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 216 | { |
fc8c72eb | 217 | return (void *)ring_info->ring_buffer->buffer; |
3e7ee490 HJ |
218 | } |
219 | ||
220 | ||
b2a5a585 S |
221 | /* |
222 | * | |
223 | * hv_get_ring_buffersize() | |
224 | * | |
225 | * Get the size of the ring buffer | |
226 | */ | |
4d643114 | 227 | static inline u32 |
2b8a912e | 228 | hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 229 | { |
fc8c72eb | 230 | return ring_info->ring_datasize; |
3e7ee490 HJ |
231 | } |
232 | ||
b2a5a585 S |
233 | /* |
234 | * | |
235 | * hv_get_ring_bufferindices() | |
236 | * | |
237 | * Get the read and write indices as u64 of the specified ring buffer | |
238 | * | |
239 | */ | |
59471438 | 240 | static inline u64 |
2b8a912e | 241 | hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 242 | { |
fc8c72eb | 243 | return (u64)ring_info->ring_buffer->write_index << 32; |
3e7ee490 HJ |
244 | } |
245 | ||
8f1136ae S |
246 | /* |
247 | * | |
248 | * hv_copyfrom_ringbuffer() | |
249 | * | |
250 | * Helper routine to copy to source from ring buffer. | |
251 | * Assume there is enough room. Handles wrap-around in src case only!! | |
252 | * | |
253 | */ | |
254 | static u32 hv_copyfrom_ringbuffer( | |
255 | struct hv_ring_buffer_info *ring_info, | |
256 | void *dest, | |
257 | u32 destlen, | |
258 | u32 start_read_offset) | |
259 | { | |
260 | void *ring_buffer = hv_get_ring_buffer(ring_info); | |
261 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); | |
262 | ||
263 | u32 frag_len; | |
264 | ||
265 | /* wrap-around detected at the src */ | |
266 | if (destlen > ring_buffer_size - start_read_offset) { | |
267 | frag_len = ring_buffer_size - start_read_offset; | |
268 | ||
269 | memcpy(dest, ring_buffer + start_read_offset, frag_len); | |
270 | memcpy(dest + frag_len, ring_buffer, destlen - frag_len); | |
271 | } else | |
272 | ||
273 | memcpy(dest, ring_buffer + start_read_offset, destlen); | |
274 | ||
275 | ||
276 | start_read_offset += destlen; | |
277 | start_read_offset %= ring_buffer_size; | |
278 | ||
279 | return start_read_offset; | |
280 | } | |
281 | ||
282 | ||
7581578d S |
283 | /* |
284 | * | |
285 | * hv_copyto_ringbuffer() | |
286 | * | |
287 | * Helper routine to copy from source to ring buffer. | |
288 | * Assume there is enough room. Handles wrap-around in dest case only!! | |
289 | * | |
290 | */ | |
291 | static u32 hv_copyto_ringbuffer( | |
fc8c72eb HZ |
292 | struct hv_ring_buffer_info *ring_info, |
293 | u32 start_write_offset, | |
294 | void *src, | |
7581578d S |
295 | u32 srclen) |
296 | { | |
297 | void *ring_buffer = hv_get_ring_buffer(ring_info); | |
298 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); | |
299 | u32 frag_len; | |
300 | ||
301 | /* wrap-around detected! */ | |
302 | if (srclen > ring_buffer_size - start_write_offset) { | |
303 | frag_len = ring_buffer_size - start_write_offset; | |
304 | memcpy(ring_buffer + start_write_offset, src, frag_len); | |
305 | memcpy(ring_buffer, src + frag_len, srclen - frag_len); | |
306 | } else | |
307 | memcpy(ring_buffer + start_write_offset, src, srclen); | |
3e7ee490 | 308 | |
7581578d S |
309 | start_write_offset += srclen; |
310 | start_write_offset %= ring_buffer_size; | |
311 | ||
312 | return start_write_offset; | |
313 | } | |
3e7ee490 | 314 | |
b2a5a585 S |
315 | /* |
316 | * | |
317 | * hv_ringbuffer_get_debuginfo() | |
318 | * | |
319 | * Get various debug metrics for the specified ring buffer | |
320 | * | |
321 | */ | |
a75b61d5 | 322 | void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, |
80682b7a | 323 | struct hv_ring_buffer_debug_info *debug_info) |
3e7ee490 | 324 | { |
fc8c72eb HZ |
325 | u32 bytes_avail_towrite; |
326 | u32 bytes_avail_toread; | |
3e7ee490 | 327 | |
fc8c72eb | 328 | if (ring_info->ring_buffer) { |
2b8a912e | 329 | hv_get_ringbuffer_availbytes(ring_info, |
fc8c72eb HZ |
330 | &bytes_avail_toread, |
331 | &bytes_avail_towrite); | |
3e7ee490 | 332 | |
fc8c72eb HZ |
333 | debug_info->bytes_avail_toread = bytes_avail_toread; |
334 | debug_info->bytes_avail_towrite = bytes_avail_towrite; | |
82f8bd40 | 335 | debug_info->current_read_index = |
fc8c72eb | 336 | ring_info->ring_buffer->read_index; |
82f8bd40 | 337 | debug_info->current_write_index = |
fc8c72eb | 338 | ring_info->ring_buffer->write_index; |
82f8bd40 | 339 | debug_info->current_interrupt_mask = |
fc8c72eb | 340 | ring_info->ring_buffer->interrupt_mask; |
3e7ee490 HJ |
341 | } |
342 | } | |
343 | ||
b2a5a585 S |
344 | /* |
345 | * | |
346 | * hv_ringbuffer_init() | |
347 | * | |
348 | *Initialize the ring buffer | |
349 | * | |
350 | */ | |
72a95cbc | 351 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, |
fc8c72eb | 352 | void *buffer, u32 buflen) |
3e7ee490 | 353 | { |
4a1b3acc | 354 | if (sizeof(struct hv_ring_buffer) != PAGE_SIZE) |
3324fb40 | 355 | return -EINVAL; |
3e7ee490 | 356 | |
fc8c72eb | 357 | memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); |
3e7ee490 | 358 | |
fc8c72eb HZ |
359 | ring_info->ring_buffer = (struct hv_ring_buffer *)buffer; |
360 | ring_info->ring_buffer->read_index = | |
361 | ring_info->ring_buffer->write_index = 0; | |
3e7ee490 | 362 | |
fc8c72eb HZ |
363 | ring_info->ring_size = buflen; |
364 | ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer); | |
3e7ee490 | 365 | |
fc8c72eb | 366 | spin_lock_init(&ring_info->ring_lock); |
3e7ee490 HJ |
367 | |
368 | return 0; | |
369 | } | |
370 | ||
b2a5a585 S |
371 | /* |
372 | * | |
373 | * hv_ringbuffer_cleanup() | |
374 | * | |
375 | * Cleanup the ring buffer | |
376 | * | |
377 | */ | |
2dba688b | 378 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 379 | { |
3e7ee490 HJ |
380 | } |
381 | ||
b2a5a585 S |
382 | /* |
383 | * | |
384 | * hv_ringbuffer_write() | |
385 | * | |
386 | * Write to the ring buffer | |
387 | * | |
388 | */ | |
633c4dce | 389 | int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, |
98fa8cf4 | 390 | struct scatterlist *sglist, u32 sgcount, bool *signal) |
3e7ee490 | 391 | { |
4408f531 | 392 | int i = 0; |
fc8c72eb HZ |
393 | u32 bytes_avail_towrite; |
394 | u32 bytes_avail_toread; | |
395 | u32 totalbytes_towrite = 0; | |
3e7ee490 | 396 | |
b219b3f7 | 397 | struct scatterlist *sg; |
66a60543 | 398 | u32 next_write_location; |
98fa8cf4 | 399 | u32 old_write; |
fc8c72eb | 400 | u64 prev_indices = 0; |
a98f96ee | 401 | unsigned long flags; |
3e7ee490 | 402 | |
b219b3f7 | 403 | for_each_sg(sglist, sg, sgcount, i) |
3e7ee490 | 404 | { |
fc8c72eb | 405 | totalbytes_towrite += sg->length; |
3e7ee490 HJ |
406 | } |
407 | ||
fc8c72eb | 408 | totalbytes_towrite += sizeof(u64); |
3e7ee490 | 409 | |
fc8c72eb | 410 | spin_lock_irqsave(&outring_info->ring_lock, flags); |
3e7ee490 | 411 | |
2b8a912e | 412 | hv_get_ringbuffer_availbytes(outring_info, |
fc8c72eb HZ |
413 | &bytes_avail_toread, |
414 | &bytes_avail_towrite); | |
3e7ee490 | 415 | |
3e7ee490 | 416 | |
4408f531 B |
417 | /* If there is only room for the packet, assume it is full. */ |
418 | /* Otherwise, the next time around, we think the ring buffer */ | |
454f18a9 | 419 | /* is empty since the read index == write index */ |
fc8c72eb | 420 | if (bytes_avail_towrite <= totalbytes_towrite) { |
fc8c72eb | 421 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
d2598f01 | 422 | return -EAGAIN; |
3e7ee490 HJ |
423 | } |
424 | ||
454f18a9 | 425 | /* Write to the ring buffer */ |
2b8a912e | 426 | next_write_location = hv_get_next_write_location(outring_info); |
3e7ee490 | 427 | |
98fa8cf4 S |
428 | old_write = next_write_location; |
429 | ||
b219b3f7 | 430 | for_each_sg(sglist, sg, sgcount, i) |
3e7ee490 | 431 | { |
2b8a912e | 432 | next_write_location = hv_copyto_ringbuffer(outring_info, |
fc8c72eb | 433 | next_write_location, |
b219b3f7 NP |
434 | sg_virt(sg), |
435 | sg->length); | |
3e7ee490 HJ |
436 | } |
437 | ||
454f18a9 | 438 | /* Set previous packet start */ |
2b8a912e | 439 | prev_indices = hv_get_ring_bufferindices(outring_info); |
3e7ee490 | 440 | |
2b8a912e | 441 | next_write_location = hv_copyto_ringbuffer(outring_info, |
fc8c72eb HZ |
442 | next_write_location, |
443 | &prev_indices, | |
b219b3f7 | 444 | sizeof(u64)); |
3e7ee490 | 445 | |
98fa8cf4 | 446 | /* Issue a full memory barrier before updating the write index */ |
35848f68 | 447 | mb(); |
3e7ee490 | 448 | |
454f18a9 | 449 | /* Now, update the write location */ |
2b8a912e | 450 | hv_set_next_write_location(outring_info, next_write_location); |
3e7ee490 | 451 | |
3e7ee490 | 452 | |
fc8c72eb | 453 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
98fa8cf4 S |
454 | |
455 | *signal = hv_need_to_signal(old_write, outring_info); | |
3e7ee490 HJ |
456 | return 0; |
457 | } | |
458 | ||
459 | ||
b2a5a585 S |
460 | /* |
461 | * | |
462 | * hv_ringbuffer_peek() | |
463 | * | |
464 | * Read without advancing the read index | |
465 | * | |
466 | */ | |
a89186c2 | 467 | int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info, |
fc8c72eb | 468 | void *Buffer, u32 buflen) |
3e7ee490 | 469 | { |
fc8c72eb HZ |
470 | u32 bytes_avail_towrite; |
471 | u32 bytes_avail_toread; | |
472 | u32 next_read_location = 0; | |
a98f96ee | 473 | unsigned long flags; |
3e7ee490 | 474 | |
fc8c72eb | 475 | spin_lock_irqsave(&Inring_info->ring_lock, flags); |
3e7ee490 | 476 | |
2b8a912e | 477 | hv_get_ringbuffer_availbytes(Inring_info, |
fc8c72eb HZ |
478 | &bytes_avail_toread, |
479 | &bytes_avail_towrite); | |
3e7ee490 | 480 | |
454f18a9 | 481 | /* Make sure there is something to read */ |
fc8c72eb | 482 | if (bytes_avail_toread < buflen) { |
3e7ee490 | 483 | |
fc8c72eb | 484 | spin_unlock_irqrestore(&Inring_info->ring_lock, flags); |
3e7ee490 | 485 | |
d2598f01 | 486 | return -EAGAIN; |
3e7ee490 HJ |
487 | } |
488 | ||
454f18a9 | 489 | /* Convert to byte offset */ |
2b8a912e | 490 | next_read_location = hv_get_next_read_location(Inring_info); |
3e7ee490 | 491 | |
2b8a912e | 492 | next_read_location = hv_copyfrom_ringbuffer(Inring_info, |
4408f531 | 493 | Buffer, |
fc8c72eb HZ |
494 | buflen, |
495 | next_read_location); | |
3e7ee490 | 496 | |
fc8c72eb | 497 | spin_unlock_irqrestore(&Inring_info->ring_lock, flags); |
3e7ee490 HJ |
498 | |
499 | return 0; | |
500 | } | |
501 | ||
502 | ||
b2a5a585 S |
503 | /* |
504 | * | |
505 | * hv_ringbuffer_read() | |
506 | * | |
507 | * Read and advance the read index | |
508 | * | |
509 | */ | |
38397c8a | 510 | int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, |
c2b8e520 | 511 | u32 buflen, u32 offset, bool *signal) |
3e7ee490 | 512 | { |
fc8c72eb HZ |
513 | u32 bytes_avail_towrite; |
514 | u32 bytes_avail_toread; | |
515 | u32 next_read_location = 0; | |
516 | u64 prev_indices = 0; | |
a98f96ee | 517 | unsigned long flags; |
c2b8e520 | 518 | u32 old_read; |
3e7ee490 | 519 | |
fc8c72eb | 520 | if (buflen <= 0) |
a16e1485 | 521 | return -EINVAL; |
3e7ee490 | 522 | |
fc8c72eb | 523 | spin_lock_irqsave(&inring_info->ring_lock, flags); |
3e7ee490 | 524 | |
2b8a912e | 525 | hv_get_ringbuffer_availbytes(inring_info, |
fc8c72eb HZ |
526 | &bytes_avail_toread, |
527 | &bytes_avail_towrite); | |
3e7ee490 | 528 | |
c2b8e520 S |
529 | old_read = bytes_avail_toread; |
530 | ||
454f18a9 | 531 | /* Make sure there is something to read */ |
fc8c72eb | 532 | if (bytes_avail_toread < buflen) { |
fc8c72eb | 533 | spin_unlock_irqrestore(&inring_info->ring_lock, flags); |
3e7ee490 | 534 | |
d2598f01 | 535 | return -EAGAIN; |
3e7ee490 HJ |
536 | } |
537 | ||
1ac58644 | 538 | next_read_location = |
2b8a912e | 539 | hv_get_next_readlocation_withoffset(inring_info, offset); |
3e7ee490 | 540 | |
2b8a912e | 541 | next_read_location = hv_copyfrom_ringbuffer(inring_info, |
fc8c72eb HZ |
542 | buffer, |
543 | buflen, | |
544 | next_read_location); | |
3e7ee490 | 545 | |
2b8a912e | 546 | next_read_location = hv_copyfrom_ringbuffer(inring_info, |
fc8c72eb | 547 | &prev_indices, |
4408f531 | 548 | sizeof(u64), |
fc8c72eb | 549 | next_read_location); |
3e7ee490 | 550 | |
454f18a9 | 551 | /* Make sure all reads are done before we update the read index since */ |
4408f531 B |
552 | /* the writer may start writing to the read area once the read index */ |
553 | /*is updated */ | |
35848f68 | 554 | mb(); |
3e7ee490 | 555 | |
454f18a9 | 556 | /* Update the read index */ |
2b8a912e | 557 | hv_set_next_read_location(inring_info, next_read_location); |
3e7ee490 | 558 | |
fc8c72eb | 559 | spin_unlock_irqrestore(&inring_info->ring_lock, flags); |
3e7ee490 | 560 | |
c2b8e520 S |
561 | *signal = hv_need_to_signal_on_read(old_read, inring_info); |
562 | ||
3e7ee490 HJ |
563 | return 0; |
564 | } |