]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Simple C functions to supplement the C library | |
3 | * | |
4 | * Copyright (c) 2006 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | #include "qemu-common.h" | |
25 | #include "host-utils.h" | |
26 | #include <math.h> | |
27 | ||
28 | #include "qemu_socket.h" | |
29 | ||
30 | void pstrcpy(char *buf, int buf_size, const char *str) | |
31 | { | |
32 | int c; | |
33 | char *q = buf; | |
34 | ||
35 | if (buf_size <= 0) | |
36 | return; | |
37 | ||
38 | for(;;) { | |
39 | c = *str++; | |
40 | if (c == 0 || q >= buf + buf_size - 1) | |
41 | break; | |
42 | *q++ = c; | |
43 | } | |
44 | *q = '\0'; | |
45 | } | |
46 | ||
47 | /* strcat and truncate. */ | |
48 | char *pstrcat(char *buf, int buf_size, const char *s) | |
49 | { | |
50 | int len; | |
51 | len = strlen(buf); | |
52 | if (len < buf_size) | |
53 | pstrcpy(buf + len, buf_size - len, s); | |
54 | return buf; | |
55 | } | |
56 | ||
57 | int strstart(const char *str, const char *val, const char **ptr) | |
58 | { | |
59 | const char *p, *q; | |
60 | p = str; | |
61 | q = val; | |
62 | while (*q != '\0') { | |
63 | if (*p != *q) | |
64 | return 0; | |
65 | p++; | |
66 | q++; | |
67 | } | |
68 | if (ptr) | |
69 | *ptr = p; | |
70 | return 1; | |
71 | } | |
72 | ||
73 | int stristart(const char *str, const char *val, const char **ptr) | |
74 | { | |
75 | const char *p, *q; | |
76 | p = str; | |
77 | q = val; | |
78 | while (*q != '\0') { | |
79 | if (qemu_toupper(*p) != qemu_toupper(*q)) | |
80 | return 0; | |
81 | p++; | |
82 | q++; | |
83 | } | |
84 | if (ptr) | |
85 | *ptr = p; | |
86 | return 1; | |
87 | } | |
88 | ||
89 | /* XXX: use host strnlen if available ? */ | |
90 | int qemu_strnlen(const char *s, int max_len) | |
91 | { | |
92 | int i; | |
93 | ||
94 | for(i = 0; i < max_len; i++) { | |
95 | if (s[i] == '\0') { | |
96 | break; | |
97 | } | |
98 | } | |
99 | return i; | |
100 | } | |
101 | ||
102 | time_t mktimegm(struct tm *tm) | |
103 | { | |
104 | time_t t; | |
105 | int y = tm->tm_year + 1900, m = tm->tm_mon + 1, d = tm->tm_mday; | |
106 | if (m < 3) { | |
107 | m += 12; | |
108 | y--; | |
109 | } | |
110 | t = 86400 * (d + (153 * m - 457) / 5 + 365 * y + y / 4 - y / 100 + | |
111 | y / 400 - 719469); | |
112 | t += 3600 * tm->tm_hour + 60 * tm->tm_min + tm->tm_sec; | |
113 | return t; | |
114 | } | |
115 | ||
116 | int qemu_fls(int i) | |
117 | { | |
118 | return 32 - clz32(i); | |
119 | } | |
120 | ||
121 | /* | |
122 | * Make sure data goes on disk, but if possible do not bother to | |
123 | * write out the inode just for timestamp updates. | |
124 | * | |
125 | * Unfortunately even in 2009 many operating systems do not support | |
126 | * fdatasync and have to fall back to fsync. | |
127 | */ | |
128 | int qemu_fdatasync(int fd) | |
129 | { | |
130 | #ifdef CONFIG_FDATASYNC | |
131 | return fdatasync(fd); | |
132 | #else | |
133 | return fsync(fd); | |
134 | #endif | |
135 | } | |
136 | ||
137 | /* io vectors */ | |
138 | ||
139 | void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint) | |
140 | { | |
141 | qiov->iov = g_malloc(alloc_hint * sizeof(struct iovec)); | |
142 | qiov->niov = 0; | |
143 | qiov->nalloc = alloc_hint; | |
144 | qiov->size = 0; | |
145 | } | |
146 | ||
147 | void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov) | |
148 | { | |
149 | int i; | |
150 | ||
151 | qiov->iov = iov; | |
152 | qiov->niov = niov; | |
153 | qiov->nalloc = -1; | |
154 | qiov->size = 0; | |
155 | for (i = 0; i < niov; i++) | |
156 | qiov->size += iov[i].iov_len; | |
157 | } | |
158 | ||
159 | void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len) | |
160 | { | |
161 | assert(qiov->nalloc != -1); | |
162 | ||
163 | if (qiov->niov == qiov->nalloc) { | |
164 | qiov->nalloc = 2 * qiov->nalloc + 1; | |
165 | qiov->iov = g_realloc(qiov->iov, qiov->nalloc * sizeof(struct iovec)); | |
166 | } | |
167 | qiov->iov[qiov->niov].iov_base = base; | |
168 | qiov->iov[qiov->niov].iov_len = len; | |
169 | qiov->size += len; | |
170 | ++qiov->niov; | |
171 | } | |
172 | ||
173 | /* | |
174 | * Copies iovecs from src to the end of dst. It starts copying after skipping | |
175 | * the given number of bytes in src and copies until src is completely copied | |
176 | * or the total size of the copied iovec reaches size.The size of the last | |
177 | * copied iovec is changed in order to fit the specified total size if it isn't | |
178 | * a perfect fit already. | |
179 | */ | |
180 | void qemu_iovec_copy(QEMUIOVector *dst, QEMUIOVector *src, uint64_t skip, | |
181 | size_t size) | |
182 | { | |
183 | int i; | |
184 | size_t done; | |
185 | void *iov_base; | |
186 | uint64_t iov_len; | |
187 | ||
188 | assert(dst->nalloc != -1); | |
189 | ||
190 | done = 0; | |
191 | for (i = 0; (i < src->niov) && (done != size); i++) { | |
192 | if (skip >= src->iov[i].iov_len) { | |
193 | /* Skip the whole iov */ | |
194 | skip -= src->iov[i].iov_len; | |
195 | continue; | |
196 | } else { | |
197 | /* Skip only part (or nothing) of the iov */ | |
198 | iov_base = (uint8_t*) src->iov[i].iov_base + skip; | |
199 | iov_len = src->iov[i].iov_len - skip; | |
200 | skip = 0; | |
201 | } | |
202 | ||
203 | if (done + iov_len > size) { | |
204 | qemu_iovec_add(dst, iov_base, size - done); | |
205 | break; | |
206 | } else { | |
207 | qemu_iovec_add(dst, iov_base, iov_len); | |
208 | } | |
209 | done += iov_len; | |
210 | } | |
211 | } | |
212 | ||
213 | void qemu_iovec_concat(QEMUIOVector *dst, QEMUIOVector *src, size_t size) | |
214 | { | |
215 | qemu_iovec_copy(dst, src, 0, size); | |
216 | } | |
217 | ||
218 | void qemu_iovec_destroy(QEMUIOVector *qiov) | |
219 | { | |
220 | assert(qiov->nalloc != -1); | |
221 | ||
222 | qemu_iovec_reset(qiov); | |
223 | g_free(qiov->iov); | |
224 | qiov->nalloc = 0; | |
225 | qiov->iov = NULL; | |
226 | } | |
227 | ||
228 | void qemu_iovec_reset(QEMUIOVector *qiov) | |
229 | { | |
230 | assert(qiov->nalloc != -1); | |
231 | ||
232 | qiov->niov = 0; | |
233 | qiov->size = 0; | |
234 | } | |
235 | ||
236 | void qemu_iovec_to_buffer(QEMUIOVector *qiov, void *buf) | |
237 | { | |
238 | uint8_t *p = (uint8_t *)buf; | |
239 | int i; | |
240 | ||
241 | for (i = 0; i < qiov->niov; ++i) { | |
242 | memcpy(p, qiov->iov[i].iov_base, qiov->iov[i].iov_len); | |
243 | p += qiov->iov[i].iov_len; | |
244 | } | |
245 | } | |
246 | ||
247 | void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count) | |
248 | { | |
249 | const uint8_t *p = (const uint8_t *)buf; | |
250 | size_t copy; | |
251 | int i; | |
252 | ||
253 | for (i = 0; i < qiov->niov && count; ++i) { | |
254 | copy = count; | |
255 | if (copy > qiov->iov[i].iov_len) | |
256 | copy = qiov->iov[i].iov_len; | |
257 | memcpy(qiov->iov[i].iov_base, p, copy); | |
258 | p += copy; | |
259 | count -= copy; | |
260 | } | |
261 | } | |
262 | ||
263 | void qemu_iovec_memset(QEMUIOVector *qiov, int c, size_t count) | |
264 | { | |
265 | size_t n; | |
266 | int i; | |
267 | ||
268 | for (i = 0; i < qiov->niov && count; ++i) { | |
269 | n = MIN(count, qiov->iov[i].iov_len); | |
270 | memset(qiov->iov[i].iov_base, c, n); | |
271 | count -= n; | |
272 | } | |
273 | } | |
274 | ||
275 | void qemu_iovec_memset_skip(QEMUIOVector *qiov, int c, size_t count, | |
276 | size_t skip) | |
277 | { | |
278 | int i; | |
279 | size_t done; | |
280 | void *iov_base; | |
281 | uint64_t iov_len; | |
282 | ||
283 | done = 0; | |
284 | for (i = 0; (i < qiov->niov) && (done != count); i++) { | |
285 | if (skip >= qiov->iov[i].iov_len) { | |
286 | /* Skip the whole iov */ | |
287 | skip -= qiov->iov[i].iov_len; | |
288 | continue; | |
289 | } else { | |
290 | /* Skip only part (or nothing) of the iov */ | |
291 | iov_base = (uint8_t*) qiov->iov[i].iov_base + skip; | |
292 | iov_len = qiov->iov[i].iov_len - skip; | |
293 | skip = 0; | |
294 | } | |
295 | ||
296 | if (done + iov_len > count) { | |
297 | memset(iov_base, c, count - done); | |
298 | break; | |
299 | } else { | |
300 | memset(iov_base, c, iov_len); | |
301 | } | |
302 | done += iov_len; | |
303 | } | |
304 | } | |
305 | ||
306 | /* | |
307 | * Checks if a buffer is all zeroes | |
308 | * | |
309 | * Attention! The len must be a multiple of 4 * sizeof(long) due to | |
310 | * restriction of optimizations in this function. | |
311 | */ | |
312 | bool buffer_is_zero(const void *buf, size_t len) | |
313 | { | |
314 | /* | |
315 | * Use long as the biggest available internal data type that fits into the | |
316 | * CPU register and unroll the loop to smooth out the effect of memory | |
317 | * latency. | |
318 | */ | |
319 | ||
320 | size_t i; | |
321 | long d0, d1, d2, d3; | |
322 | const long * const data = buf; | |
323 | ||
324 | assert(len % (4 * sizeof(long)) == 0); | |
325 | len /= sizeof(long); | |
326 | ||
327 | for (i = 0; i < len; i += 4) { | |
328 | d0 = data[i + 0]; | |
329 | d1 = data[i + 1]; | |
330 | d2 = data[i + 2]; | |
331 | d3 = data[i + 3]; | |
332 | ||
333 | if (d0 || d1 || d2 || d3) { | |
334 | return false; | |
335 | } | |
336 | } | |
337 | ||
338 | return true; | |
339 | } | |
340 | ||
341 | #ifndef _WIN32 | |
342 | /* Sets a specific flag */ | |
343 | int fcntl_setfl(int fd, int flag) | |
344 | { | |
345 | int flags; | |
346 | ||
347 | flags = fcntl(fd, F_GETFL); | |
348 | if (flags == -1) | |
349 | return -errno; | |
350 | ||
351 | if (fcntl(fd, F_SETFL, flags | flag) == -1) | |
352 | return -errno; | |
353 | ||
354 | return 0; | |
355 | } | |
356 | #endif | |
357 | ||
358 | static int64_t suffix_mul(char suffix, int64_t unit) | |
359 | { | |
360 | switch (qemu_toupper(suffix)) { | |
361 | case STRTOSZ_DEFSUFFIX_B: | |
362 | return 1; | |
363 | case STRTOSZ_DEFSUFFIX_KB: | |
364 | return unit; | |
365 | case STRTOSZ_DEFSUFFIX_MB: | |
366 | return unit * unit; | |
367 | case STRTOSZ_DEFSUFFIX_GB: | |
368 | return unit * unit * unit; | |
369 | case STRTOSZ_DEFSUFFIX_TB: | |
370 | return unit * unit * unit * unit; | |
371 | } | |
372 | return -1; | |
373 | } | |
374 | ||
375 | /* | |
376 | * Convert string to bytes, allowing either B/b for bytes, K/k for KB, | |
377 | * M/m for MB, G/g for GB or T/t for TB. End pointer will be returned | |
378 | * in *end, if not NULL. Return -1 on error. | |
379 | */ | |
380 | int64_t strtosz_suffix_unit(const char *nptr, char **end, | |
381 | const char default_suffix, int64_t unit) | |
382 | { | |
383 | int64_t retval = -1; | |
384 | char *endptr; | |
385 | unsigned char c; | |
386 | int mul_required = 0; | |
387 | double val, mul, integral, fraction; | |
388 | ||
389 | errno = 0; | |
390 | val = strtod(nptr, &endptr); | |
391 | if (isnan(val) || endptr == nptr || errno != 0) { | |
392 | goto fail; | |
393 | } | |
394 | fraction = modf(val, &integral); | |
395 | if (fraction != 0) { | |
396 | mul_required = 1; | |
397 | } | |
398 | c = *endptr; | |
399 | mul = suffix_mul(c, unit); | |
400 | if (mul >= 0) { | |
401 | endptr++; | |
402 | } else { | |
403 | mul = suffix_mul(default_suffix, unit); | |
404 | assert(mul >= 0); | |
405 | } | |
406 | if (mul == 1 && mul_required) { | |
407 | goto fail; | |
408 | } | |
409 | if ((val * mul >= INT64_MAX) || val < 0) { | |
410 | goto fail; | |
411 | } | |
412 | retval = val * mul; | |
413 | ||
414 | fail: | |
415 | if (end) { | |
416 | *end = endptr; | |
417 | } | |
418 | ||
419 | return retval; | |
420 | } | |
421 | ||
422 | int64_t strtosz_suffix(const char *nptr, char **end, const char default_suffix) | |
423 | { | |
424 | return strtosz_suffix_unit(nptr, end, default_suffix, 1024); | |
425 | } | |
426 | ||
427 | int64_t strtosz(const char *nptr, char **end) | |
428 | { | |
429 | return strtosz_suffix(nptr, end, STRTOSZ_DEFSUFFIX_MB); | |
430 | } | |
431 | ||
432 | int qemu_parse_fd(const char *param) | |
433 | { | |
434 | int fd; | |
435 | char *endptr = NULL; | |
436 | ||
437 | fd = strtol(param, &endptr, 10); | |
438 | if (*endptr || (fd == 0 && param == endptr)) { | |
439 | return -1; | |
440 | } | |
441 | return fd; | |
442 | } | |
443 | ||
444 | /* | |
445 | * Send/recv data with iovec buffers | |
446 | * | |
447 | * This function send/recv data from/to the iovec buffer directly. | |
448 | * The first `offset' bytes in the iovec buffer are skipped and next | |
449 | * `len' bytes are used. | |
450 | * | |
451 | * For example, | |
452 | * | |
453 | * do_sendv_recvv(sockfd, iov, len, offset, 1); | |
454 | * | |
455 | * is equal to | |
456 | * | |
457 | * char *buf = malloc(size); | |
458 | * iov_to_buf(iov, iovcnt, buf, offset, size); | |
459 | * send(sockfd, buf, size, 0); | |
460 | * free(buf); | |
461 | */ | |
462 | static int do_sendv_recvv(int sockfd, struct iovec *iov, int len, int offset, | |
463 | int do_sendv) | |
464 | { | |
465 | int ret, diff, iovlen; | |
466 | struct iovec *last_iov; | |
467 | ||
468 | /* last_iov is inclusive, so count from one. */ | |
469 | iovlen = 1; | |
470 | last_iov = iov; | |
471 | len += offset; | |
472 | ||
473 | while (last_iov->iov_len < len) { | |
474 | len -= last_iov->iov_len; | |
475 | ||
476 | last_iov++; | |
477 | iovlen++; | |
478 | } | |
479 | ||
480 | diff = last_iov->iov_len - len; | |
481 | last_iov->iov_len -= diff; | |
482 | ||
483 | while (iov->iov_len <= offset) { | |
484 | offset -= iov->iov_len; | |
485 | ||
486 | iov++; | |
487 | iovlen--; | |
488 | } | |
489 | ||
490 | iov->iov_base = (char *) iov->iov_base + offset; | |
491 | iov->iov_len -= offset; | |
492 | ||
493 | { | |
494 | #if defined CONFIG_IOVEC && defined CONFIG_POSIX | |
495 | struct msghdr msg; | |
496 | memset(&msg, 0, sizeof(msg)); | |
497 | msg.msg_iov = iov; | |
498 | msg.msg_iovlen = iovlen; | |
499 | ||
500 | do { | |
501 | if (do_sendv) { | |
502 | ret = sendmsg(sockfd, &msg, 0); | |
503 | } else { | |
504 | ret = recvmsg(sockfd, &msg, 0); | |
505 | } | |
506 | } while (ret == -1 && errno == EINTR); | |
507 | #else | |
508 | struct iovec *p = iov; | |
509 | ret = 0; | |
510 | while (iovlen > 0) { | |
511 | int rc; | |
512 | if (do_sendv) { | |
513 | rc = send(sockfd, p->iov_base, p->iov_len, 0); | |
514 | } else { | |
515 | rc = qemu_recv(sockfd, p->iov_base, p->iov_len, 0); | |
516 | } | |
517 | if (rc == -1) { | |
518 | if (errno == EINTR) { | |
519 | continue; | |
520 | } | |
521 | if (ret == 0) { | |
522 | ret = -1; | |
523 | } | |
524 | break; | |
525 | } | |
526 | if (rc == 0) { | |
527 | break; | |
528 | } | |
529 | ret += rc; | |
530 | iovlen--, p++; | |
531 | } | |
532 | #endif | |
533 | } | |
534 | ||
535 | /* Undo the changes above */ | |
536 | iov->iov_base = (char *) iov->iov_base - offset; | |
537 | iov->iov_len += offset; | |
538 | last_iov->iov_len += diff; | |
539 | return ret; | |
540 | } | |
541 | ||
542 | int qemu_recvv(int sockfd, struct iovec *iov, int len, int iov_offset) | |
543 | { | |
544 | return do_sendv_recvv(sockfd, iov, len, iov_offset, 0); | |
545 | } | |
546 | ||
547 | int qemu_sendv(int sockfd, struct iovec *iov, int len, int iov_offset) | |
548 | { | |
549 | return do_sendv_recvv(sockfd, iov, len, iov_offset, 1); | |
550 | } | |
551 |