]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Options Visitor | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2012-2016 | |
5 | * | |
6 | * Author: Laszlo Ersek <[email protected]> | |
7 | * | |
8 | * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. | |
9 | * See the COPYING.LIB file in the top-level directory. | |
10 | * | |
11 | */ | |
12 | ||
13 | #include "qemu/osdep.h" | |
14 | #include "qapi/error.h" | |
15 | #include "qemu/cutils.h" | |
16 | #include "qapi/qmp/qerror.h" | |
17 | #include "qapi/opts-visitor.h" | |
18 | #include "qemu/queue.h" | |
19 | #include "qemu/option_int.h" | |
20 | #include "qapi/visitor-impl.h" | |
21 | ||
22 | ||
23 | enum ListMode | |
24 | { | |
25 | LM_NONE, /* not traversing a list of repeated options */ | |
26 | ||
27 | LM_IN_PROGRESS, /* opts_next_list() ready to be called. | |
28 | * | |
29 | * Generating the next list link will consume the most | |
30 | * recently parsed QemuOpt instance of the repeated | |
31 | * option. | |
32 | * | |
33 | * Parsing a value into the list link will examine the | |
34 | * next QemuOpt instance of the repeated option, and | |
35 | * possibly enter LM_SIGNED_INTERVAL or | |
36 | * LM_UNSIGNED_INTERVAL. | |
37 | */ | |
38 | ||
39 | LM_SIGNED_INTERVAL, /* opts_next_list() has been called. | |
40 | * | |
41 | * Generating the next list link will consume the most | |
42 | * recently stored element from the signed interval, | |
43 | * parsed from the most recent QemuOpt instance of the | |
44 | * repeated option. This may consume QemuOpt itself | |
45 | * and return to LM_IN_PROGRESS. | |
46 | * | |
47 | * Parsing a value into the list link will store the | |
48 | * next element of the signed interval. | |
49 | */ | |
50 | ||
51 | LM_UNSIGNED_INTERVAL /* Same as above, only for an unsigned interval. */ | |
52 | }; | |
53 | ||
54 | typedef enum ListMode ListMode; | |
55 | ||
56 | struct OptsVisitor | |
57 | { | |
58 | Visitor visitor; | |
59 | ||
60 | /* Ownership remains with opts_visitor_new()'s caller. */ | |
61 | const QemuOpts *opts_root; | |
62 | ||
63 | unsigned depth; | |
64 | ||
65 | /* Non-null iff depth is positive. Each key is a QemuOpt name. Each value | |
66 | * is a non-empty GQueue, enumerating all QemuOpt occurrences with that | |
67 | * name. */ | |
68 | GHashTable *unprocessed_opts; | |
69 | ||
70 | /* The list currently being traversed with opts_start_list() / | |
71 | * opts_next_list(). The list must have a struct element type in the | |
72 | * schema, with a single mandatory scalar member. */ | |
73 | ListMode list_mode; | |
74 | GQueue *repeated_opts; | |
75 | ||
76 | /* When parsing a list of repeating options as integers, values of the form | |
77 | * "a-b", representing a closed interval, are allowed. Elements in the | |
78 | * range are generated individually. | |
79 | */ | |
80 | union { | |
81 | int64_t s; | |
82 | uint64_t u; | |
83 | } range_next, range_limit; | |
84 | ||
85 | /* If "opts_root->id" is set, reinstantiate it as a fake QemuOpt for | |
86 | * uniformity. Only its "name" and "str" fields are set. "fake_id_opt" does | |
87 | * not survive or escape the OptsVisitor object. | |
88 | */ | |
89 | QemuOpt *fake_id_opt; | |
90 | }; | |
91 | ||
92 | ||
93 | static OptsVisitor *to_ov(Visitor *v) | |
94 | { | |
95 | return container_of(v, OptsVisitor, visitor); | |
96 | } | |
97 | ||
98 | ||
99 | static void | |
100 | destroy_list(gpointer list) | |
101 | { | |
102 | g_queue_free(list); | |
103 | } | |
104 | ||
105 | ||
106 | static void | |
107 | opts_visitor_insert(GHashTable *unprocessed_opts, const QemuOpt *opt) | |
108 | { | |
109 | GQueue *list; | |
110 | ||
111 | list = g_hash_table_lookup(unprocessed_opts, opt->name); | |
112 | if (list == NULL) { | |
113 | list = g_queue_new(); | |
114 | ||
115 | /* GHashTable will never try to free the keys -- we supply NULL as | |
116 | * "key_destroy_func" in opts_start_struct(). Thus cast away key | |
117 | * const-ness in order to suppress gcc's warning. | |
118 | */ | |
119 | g_hash_table_insert(unprocessed_opts, (gpointer)opt->name, list); | |
120 | } | |
121 | ||
122 | /* Similarly, destroy_list() doesn't call g_queue_free_full(). */ | |
123 | g_queue_push_tail(list, (gpointer)opt); | |
124 | } | |
125 | ||
126 | ||
127 | static void | |
128 | opts_start_struct(Visitor *v, const char *name, void **obj, | |
129 | size_t size, Error **errp) | |
130 | { | |
131 | OptsVisitor *ov = to_ov(v); | |
132 | const QemuOpt *opt; | |
133 | ||
134 | if (obj) { | |
135 | *obj = g_malloc0(size); | |
136 | } | |
137 | if (ov->depth++ > 0) { | |
138 | return; | |
139 | } | |
140 | ||
141 | ov->unprocessed_opts = g_hash_table_new_full(&g_str_hash, &g_str_equal, | |
142 | NULL, &destroy_list); | |
143 | QTAILQ_FOREACH(opt, &ov->opts_root->head, next) { | |
144 | /* ensured by qemu-option.c::opts_do_parse() */ | |
145 | assert(strcmp(opt->name, "id") != 0); | |
146 | ||
147 | opts_visitor_insert(ov->unprocessed_opts, opt); | |
148 | } | |
149 | ||
150 | if (ov->opts_root->id != NULL) { | |
151 | ov->fake_id_opt = g_malloc0(sizeof *ov->fake_id_opt); | |
152 | ||
153 | ov->fake_id_opt->name = g_strdup("id"); | |
154 | ov->fake_id_opt->str = g_strdup(ov->opts_root->id); | |
155 | opts_visitor_insert(ov->unprocessed_opts, ov->fake_id_opt); | |
156 | } | |
157 | } | |
158 | ||
159 | ||
160 | static void | |
161 | opts_check_struct(Visitor *v, Error **errp) | |
162 | { | |
163 | OptsVisitor *ov = to_ov(v); | |
164 | GHashTableIter iter; | |
165 | GQueue *any; | |
166 | ||
167 | if (ov->depth > 1) { | |
168 | return; | |
169 | } | |
170 | ||
171 | /* we should have processed all (distinct) QemuOpt instances */ | |
172 | g_hash_table_iter_init(&iter, ov->unprocessed_opts); | |
173 | if (g_hash_table_iter_next(&iter, NULL, (void **)&any)) { | |
174 | const QemuOpt *first; | |
175 | ||
176 | first = g_queue_peek_head(any); | |
177 | error_setg(errp, QERR_INVALID_PARAMETER, first->name); | |
178 | } | |
179 | } | |
180 | ||
181 | ||
182 | static void | |
183 | opts_end_struct(Visitor *v, void **obj) | |
184 | { | |
185 | OptsVisitor *ov = to_ov(v); | |
186 | ||
187 | if (--ov->depth > 0) { | |
188 | return; | |
189 | } | |
190 | ||
191 | g_hash_table_destroy(ov->unprocessed_opts); | |
192 | ov->unprocessed_opts = NULL; | |
193 | if (ov->fake_id_opt) { | |
194 | g_free(ov->fake_id_opt->name); | |
195 | g_free(ov->fake_id_opt->str); | |
196 | g_free(ov->fake_id_opt); | |
197 | } | |
198 | ov->fake_id_opt = NULL; | |
199 | } | |
200 | ||
201 | ||
202 | static GQueue * | |
203 | lookup_distinct(const OptsVisitor *ov, const char *name, Error **errp) | |
204 | { | |
205 | GQueue *list; | |
206 | ||
207 | list = g_hash_table_lookup(ov->unprocessed_opts, name); | |
208 | if (!list) { | |
209 | error_setg(errp, QERR_MISSING_PARAMETER, name); | |
210 | } | |
211 | return list; | |
212 | } | |
213 | ||
214 | ||
215 | static void | |
216 | opts_start_list(Visitor *v, const char *name, GenericList **list, size_t size, | |
217 | Error **errp) | |
218 | { | |
219 | OptsVisitor *ov = to_ov(v); | |
220 | ||
221 | /* we can't traverse a list in a list */ | |
222 | assert(ov->list_mode == LM_NONE); | |
223 | /* we don't support visits without a list */ | |
224 | assert(list); | |
225 | ov->repeated_opts = lookup_distinct(ov, name, errp); | |
226 | if (ov->repeated_opts) { | |
227 | ov->list_mode = LM_IN_PROGRESS; | |
228 | *list = g_malloc0(size); | |
229 | } else { | |
230 | *list = NULL; | |
231 | } | |
232 | } | |
233 | ||
234 | ||
235 | static GenericList * | |
236 | opts_next_list(Visitor *v, GenericList *tail, size_t size) | |
237 | { | |
238 | OptsVisitor *ov = to_ov(v); | |
239 | ||
240 | switch (ov->list_mode) { | |
241 | case LM_SIGNED_INTERVAL: | |
242 | case LM_UNSIGNED_INTERVAL: | |
243 | if (ov->list_mode == LM_SIGNED_INTERVAL) { | |
244 | if (ov->range_next.s < ov->range_limit.s) { | |
245 | ++ov->range_next.s; | |
246 | break; | |
247 | } | |
248 | } else if (ov->range_next.u < ov->range_limit.u) { | |
249 | ++ov->range_next.u; | |
250 | break; | |
251 | } | |
252 | ov->list_mode = LM_IN_PROGRESS; | |
253 | /* range has been completed, fall through in order to pop option */ | |
254 | ||
255 | case LM_IN_PROGRESS: { | |
256 | const QemuOpt *opt; | |
257 | ||
258 | opt = g_queue_pop_head(ov->repeated_opts); | |
259 | if (g_queue_is_empty(ov->repeated_opts)) { | |
260 | g_hash_table_remove(ov->unprocessed_opts, opt->name); | |
261 | return NULL; | |
262 | } | |
263 | break; | |
264 | } | |
265 | ||
266 | default: | |
267 | abort(); | |
268 | } | |
269 | ||
270 | tail->next = g_malloc0(size); | |
271 | return tail->next; | |
272 | } | |
273 | ||
274 | ||
275 | static void | |
276 | opts_check_list(Visitor *v, Error **errp) | |
277 | { | |
278 | /* | |
279 | * Unvisited list elements will be reported later when checking | |
280 | * whether unvisited struct members remain. | |
281 | */ | |
282 | } | |
283 | ||
284 | ||
285 | static void | |
286 | opts_end_list(Visitor *v, void **obj) | |
287 | { | |
288 | OptsVisitor *ov = to_ov(v); | |
289 | ||
290 | assert(ov->list_mode == LM_IN_PROGRESS || | |
291 | ov->list_mode == LM_SIGNED_INTERVAL || | |
292 | ov->list_mode == LM_UNSIGNED_INTERVAL); | |
293 | ov->repeated_opts = NULL; | |
294 | ov->list_mode = LM_NONE; | |
295 | } | |
296 | ||
297 | ||
298 | static const QemuOpt * | |
299 | lookup_scalar(const OptsVisitor *ov, const char *name, Error **errp) | |
300 | { | |
301 | if (ov->list_mode == LM_NONE) { | |
302 | GQueue *list; | |
303 | ||
304 | /* the last occurrence of any QemuOpt takes effect when queried by name | |
305 | */ | |
306 | list = lookup_distinct(ov, name, errp); | |
307 | return list ? g_queue_peek_tail(list) : NULL; | |
308 | } | |
309 | assert(ov->list_mode == LM_IN_PROGRESS); | |
310 | return g_queue_peek_head(ov->repeated_opts); | |
311 | } | |
312 | ||
313 | ||
314 | static void | |
315 | processed(OptsVisitor *ov, const char *name) | |
316 | { | |
317 | if (ov->list_mode == LM_NONE) { | |
318 | g_hash_table_remove(ov->unprocessed_opts, name); | |
319 | return; | |
320 | } | |
321 | assert(ov->list_mode == LM_IN_PROGRESS); | |
322 | /* do nothing */ | |
323 | } | |
324 | ||
325 | ||
326 | static void | |
327 | opts_type_str(Visitor *v, const char *name, char **obj, Error **errp) | |
328 | { | |
329 | OptsVisitor *ov = to_ov(v); | |
330 | const QemuOpt *opt; | |
331 | ||
332 | opt = lookup_scalar(ov, name, errp); | |
333 | if (!opt) { | |
334 | *obj = NULL; | |
335 | return; | |
336 | } | |
337 | *obj = g_strdup(opt->str ? opt->str : ""); | |
338 | /* Note that we consume a string even if this is called as part of | |
339 | * an enum visit that later fails because the string is not a | |
340 | * valid enum value; this is harmless because tracking what gets | |
341 | * consumed only matters to visit_end_struct() as the final error | |
342 | * check if there were no other failures during the visit. */ | |
343 | processed(ov, name); | |
344 | } | |
345 | ||
346 | ||
347 | /* mimics qemu-option.c::parse_option_bool() */ | |
348 | static void | |
349 | opts_type_bool(Visitor *v, const char *name, bool *obj, Error **errp) | |
350 | { | |
351 | OptsVisitor *ov = to_ov(v); | |
352 | const QemuOpt *opt; | |
353 | ||
354 | opt = lookup_scalar(ov, name, errp); | |
355 | if (!opt) { | |
356 | return; | |
357 | } | |
358 | ||
359 | if (opt->str) { | |
360 | if (strcmp(opt->str, "on") == 0 || | |
361 | strcmp(opt->str, "yes") == 0 || | |
362 | strcmp(opt->str, "y") == 0) { | |
363 | *obj = true; | |
364 | } else if (strcmp(opt->str, "off") == 0 || | |
365 | strcmp(opt->str, "no") == 0 || | |
366 | strcmp(opt->str, "n") == 0) { | |
367 | *obj = false; | |
368 | } else { | |
369 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name, | |
370 | "on|yes|y|off|no|n"); | |
371 | return; | |
372 | } | |
373 | } else { | |
374 | *obj = true; | |
375 | } | |
376 | ||
377 | processed(ov, name); | |
378 | } | |
379 | ||
380 | ||
381 | static void | |
382 | opts_type_int64(Visitor *v, const char *name, int64_t *obj, Error **errp) | |
383 | { | |
384 | OptsVisitor *ov = to_ov(v); | |
385 | const QemuOpt *opt; | |
386 | const char *str; | |
387 | long long val; | |
388 | char *endptr; | |
389 | ||
390 | if (ov->list_mode == LM_SIGNED_INTERVAL) { | |
391 | *obj = ov->range_next.s; | |
392 | return; | |
393 | } | |
394 | ||
395 | opt = lookup_scalar(ov, name, errp); | |
396 | if (!opt) { | |
397 | return; | |
398 | } | |
399 | str = opt->str ? opt->str : ""; | |
400 | ||
401 | /* we've gotten past lookup_scalar() */ | |
402 | assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS); | |
403 | ||
404 | errno = 0; | |
405 | val = strtoll(str, &endptr, 0); | |
406 | if (errno == 0 && endptr > str && INT64_MIN <= val && val <= INT64_MAX) { | |
407 | if (*endptr == '\0') { | |
408 | *obj = val; | |
409 | processed(ov, name); | |
410 | return; | |
411 | } | |
412 | if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) { | |
413 | long long val2; | |
414 | ||
415 | str = endptr + 1; | |
416 | val2 = strtoll(str, &endptr, 0); | |
417 | if (errno == 0 && endptr > str && *endptr == '\0' && | |
418 | INT64_MIN <= val2 && val2 <= INT64_MAX && val <= val2 && | |
419 | (val > INT64_MAX - OPTS_VISITOR_RANGE_MAX || | |
420 | val2 < val + OPTS_VISITOR_RANGE_MAX)) { | |
421 | ov->range_next.s = val; | |
422 | ov->range_limit.s = val2; | |
423 | ov->list_mode = LM_SIGNED_INTERVAL; | |
424 | ||
425 | /* as if entering on the top */ | |
426 | *obj = ov->range_next.s; | |
427 | return; | |
428 | } | |
429 | } | |
430 | } | |
431 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name, | |
432 | (ov->list_mode == LM_NONE) ? "an int64 value" : | |
433 | "an int64 value or range"); | |
434 | } | |
435 | ||
436 | ||
437 | static void | |
438 | opts_type_uint64(Visitor *v, const char *name, uint64_t *obj, Error **errp) | |
439 | { | |
440 | OptsVisitor *ov = to_ov(v); | |
441 | const QemuOpt *opt; | |
442 | const char *str; | |
443 | unsigned long long val; | |
444 | char *endptr; | |
445 | ||
446 | if (ov->list_mode == LM_UNSIGNED_INTERVAL) { | |
447 | *obj = ov->range_next.u; | |
448 | return; | |
449 | } | |
450 | ||
451 | opt = lookup_scalar(ov, name, errp); | |
452 | if (!opt) { | |
453 | return; | |
454 | } | |
455 | str = opt->str; | |
456 | ||
457 | /* we've gotten past lookup_scalar() */ | |
458 | assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS); | |
459 | ||
460 | if (parse_uint(str, &val, &endptr, 0) == 0 && val <= UINT64_MAX) { | |
461 | if (*endptr == '\0') { | |
462 | *obj = val; | |
463 | processed(ov, name); | |
464 | return; | |
465 | } | |
466 | if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) { | |
467 | unsigned long long val2; | |
468 | ||
469 | str = endptr + 1; | |
470 | if (parse_uint_full(str, &val2, 0) == 0 && | |
471 | val2 <= UINT64_MAX && val <= val2 && | |
472 | val2 - val < OPTS_VISITOR_RANGE_MAX) { | |
473 | ov->range_next.u = val; | |
474 | ov->range_limit.u = val2; | |
475 | ov->list_mode = LM_UNSIGNED_INTERVAL; | |
476 | ||
477 | /* as if entering on the top */ | |
478 | *obj = ov->range_next.u; | |
479 | return; | |
480 | } | |
481 | } | |
482 | } | |
483 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name, | |
484 | (ov->list_mode == LM_NONE) ? "a uint64 value" : | |
485 | "a uint64 value or range"); | |
486 | } | |
487 | ||
488 | ||
489 | static void | |
490 | opts_type_size(Visitor *v, const char *name, uint64_t *obj, Error **errp) | |
491 | { | |
492 | OptsVisitor *ov = to_ov(v); | |
493 | const QemuOpt *opt; | |
494 | int err; | |
495 | ||
496 | opt = lookup_scalar(ov, name, errp); | |
497 | if (!opt) { | |
498 | return; | |
499 | } | |
500 | ||
501 | err = qemu_strtosz(opt->str ? opt->str : "", NULL, obj); | |
502 | if (err < 0) { | |
503 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name, | |
504 | "a size value"); | |
505 | return; | |
506 | } | |
507 | ||
508 | processed(ov, name); | |
509 | } | |
510 | ||
511 | ||
512 | static void | |
513 | opts_optional(Visitor *v, const char *name, bool *present) | |
514 | { | |
515 | OptsVisitor *ov = to_ov(v); | |
516 | ||
517 | /* we only support a single mandatory scalar field in a list node */ | |
518 | assert(ov->list_mode == LM_NONE); | |
519 | *present = (lookup_distinct(ov, name, NULL) != NULL); | |
520 | } | |
521 | ||
522 | ||
523 | static void | |
524 | opts_free(Visitor *v) | |
525 | { | |
526 | OptsVisitor *ov = to_ov(v); | |
527 | ||
528 | if (ov->unprocessed_opts != NULL) { | |
529 | g_hash_table_destroy(ov->unprocessed_opts); | |
530 | } | |
531 | g_free(ov->fake_id_opt); | |
532 | g_free(ov); | |
533 | } | |
534 | ||
535 | ||
536 | Visitor * | |
537 | opts_visitor_new(const QemuOpts *opts) | |
538 | { | |
539 | OptsVisitor *ov; | |
540 | ||
541 | assert(opts); | |
542 | ov = g_malloc0(sizeof *ov); | |
543 | ||
544 | ov->visitor.type = VISITOR_INPUT; | |
545 | ||
546 | ov->visitor.start_struct = &opts_start_struct; | |
547 | ov->visitor.check_struct = &opts_check_struct; | |
548 | ov->visitor.end_struct = &opts_end_struct; | |
549 | ||
550 | ov->visitor.start_list = &opts_start_list; | |
551 | ov->visitor.next_list = &opts_next_list; | |
552 | ov->visitor.check_list = &opts_check_list; | |
553 | ov->visitor.end_list = &opts_end_list; | |
554 | ||
555 | ov->visitor.type_int64 = &opts_type_int64; | |
556 | ov->visitor.type_uint64 = &opts_type_uint64; | |
557 | ov->visitor.type_size = &opts_type_size; | |
558 | ov->visitor.type_bool = &opts_type_bool; | |
559 | ov->visitor.type_str = &opts_type_str; | |
560 | ||
561 | /* type_number() is not filled in, but this is not the first visitor to | |
562 | * skip some mandatory methods... */ | |
563 | ||
564 | ov->visitor.optional = &opts_optional; | |
565 | ov->visitor.free = opts_free; | |
566 | ||
567 | ov->opts_root = opts; | |
568 | ||
569 | return &ov->visitor; | |
570 | } |