2 Copyright (C) 2019-2021 Free Software Foundation, Inc.
4 This file is part of libctf.
6 libctf is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14 See the GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; see the file COPYING. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include <sys/param.h>
26 #define EOVERFLOW ERANGE
30 #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
33 /* Make sure the ptrtab has enough space for at least one more type.
35 We start with 4KiB of ptrtab, enough for a thousand types, then grow it 25%
39 ctf_grow_ptrtab (ctf_dict_t *fp)
41 size_t new_ptrtab_len = fp->ctf_ptrtab_len;
43 /* We allocate one more ptrtab entry than we need, for the initial zero,
44 plus one because the caller will probably allocate a new type. */
46 if (fp->ctf_ptrtab == NULL)
47 new_ptrtab_len = 1024;
48 else if ((fp->ctf_typemax + 2) > fp->ctf_ptrtab_len)
49 new_ptrtab_len = fp->ctf_ptrtab_len * 1.25;
51 if (new_ptrtab_len != fp->ctf_ptrtab_len)
55 if ((new_ptrtab = realloc (fp->ctf_ptrtab,
56 new_ptrtab_len * sizeof (uint32_t))) == NULL)
57 return (ctf_set_errno (fp, ENOMEM));
59 fp->ctf_ptrtab = new_ptrtab;
60 memset (fp->ctf_ptrtab + fp->ctf_ptrtab_len, 0,
61 (new_ptrtab_len - fp->ctf_ptrtab_len) * sizeof (uint32_t));
62 fp->ctf_ptrtab_len = new_ptrtab_len;
67 /* To create an empty CTF dict, we just declare a zeroed header and call
68 ctf_bufopen() on it. If ctf_bufopen succeeds, we mark the new dict r/w and
69 initialize the dynamic members. We start assigning type IDs at 1 because
70 type ID 0 is used as a sentinel and a not-found indicator. */
73 ctf_create (int *errp)
75 static const ctf_header_t hdr = { .cth_preamble = { CTF_MAGIC, CTF_VERSION, 0 } };
77 ctf_dynhash_t *dthash;
78 ctf_dynhash_t *dvhash;
79 ctf_dynhash_t *structs = NULL, *unions = NULL, *enums = NULL, *names = NULL;
80 ctf_dynhash_t *objthash = NULL, *funchash = NULL;
85 dthash = ctf_dynhash_create (ctf_hash_integer, ctf_hash_eq_integer,
89 ctf_set_open_errno (errp, EAGAIN);
93 dvhash = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
97 ctf_set_open_errno (errp, EAGAIN);
101 structs = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
103 unions = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
105 enums = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
107 names = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
109 objthash = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
111 funchash = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
113 if (!structs || !unions || !enums || !names)
115 ctf_set_open_errno (errp, EAGAIN);
119 cts.cts_name = _CTF_SECTION;
121 cts.cts_size = sizeof (hdr);
124 if ((fp = ctf_bufopen_internal (&cts, NULL, NULL, NULL, 1, errp)) == NULL)
127 fp->ctf_structs.ctn_writable = structs;
128 fp->ctf_unions.ctn_writable = unions;
129 fp->ctf_enums.ctn_writable = enums;
130 fp->ctf_names.ctn_writable = names;
131 fp->ctf_objthash = objthash;
132 fp->ctf_funchash = funchash;
133 fp->ctf_dthash = dthash;
134 fp->ctf_dvhash = dvhash;
136 fp->ctf_snapshots = 1;
137 fp->ctf_snapshot_lu = 0;
138 fp->ctf_flags |= LCTF_DIRTY;
140 ctf_set_ctl_hashes (fp);
141 ctf_setmodel (fp, CTF_MODEL_NATIVE);
142 if (ctf_grow_ptrtab (fp) < 0)
144 ctf_set_open_errno (errp, ctf_errno (fp));
152 ctf_dynhash_destroy (structs);
153 ctf_dynhash_destroy (unions);
154 ctf_dynhash_destroy (enums);
155 ctf_dynhash_destroy (names);
156 ctf_dynhash_destroy (objthash);
157 ctf_dynhash_destroy (funchash);
158 ctf_dynhash_destroy (dvhash);
160 ctf_dynhash_destroy (dthash);
165 /* Compatibility: just update the threshold for ctf_discard. */
167 ctf_update (ctf_dict_t *fp)
169 if (!(fp->ctf_flags & LCTF_RDWR))
170 return (ctf_set_errno (fp, ECTF_RDONLY));
172 fp->ctf_dtoldid = fp->ctf_typemax;
177 ctf_name_table (ctf_dict_t *fp, int kind)
182 return &fp->ctf_structs;
184 return &fp->ctf_unions;
186 return &fp->ctf_enums;
188 return &fp->ctf_names;
193 ctf_dtd_insert (ctf_dict_t *fp, ctf_dtdef_t *dtd, int flag, int kind)
196 if (ctf_dynhash_insert (fp->ctf_dthash, (void *) (uintptr_t) dtd->dtd_type,
199 ctf_set_errno (fp, ENOMEM);
203 if (flag == CTF_ADD_ROOT && dtd->dtd_data.ctt_name
204 && (name = ctf_strraw (fp, dtd->dtd_data.ctt_name)) != NULL)
206 if (ctf_dynhash_insert (ctf_name_table (fp, kind)->ctn_writable,
207 (char *) name, (void *) (uintptr_t)
210 ctf_dynhash_remove (fp->ctf_dthash, (void *) (uintptr_t)
212 ctf_set_errno (fp, ENOMEM);
216 ctf_list_append (&fp->ctf_dtdefs, dtd);
221 ctf_dtd_delete (ctf_dict_t *fp, ctf_dtdef_t *dtd)
223 ctf_dmdef_t *dmd, *nmd;
224 int kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
225 int name_kind = kind;
228 ctf_dynhash_remove (fp->ctf_dthash, (void *) (uintptr_t) dtd->dtd_type);
235 for (dmd = ctf_list_next (&dtd->dtd_u.dtu_members);
236 dmd != NULL; dmd = nmd)
238 if (dmd->dmd_name != NULL)
239 free (dmd->dmd_name);
240 nmd = ctf_list_next (dmd);
245 free (dtd->dtd_u.dtu_argv);
248 name_kind = dtd->dtd_data.ctt_type;
252 if (dtd->dtd_data.ctt_name
253 && (name = ctf_strraw (fp, dtd->dtd_data.ctt_name)) != NULL
254 && LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info))
256 ctf_dynhash_remove (ctf_name_table (fp, name_kind)->ctn_writable,
258 ctf_str_remove_ref (fp, name, &dtd->dtd_data.ctt_name);
261 ctf_list_delete (&fp->ctf_dtdefs, dtd);
266 ctf_dtd_lookup (const ctf_dict_t *fp, ctf_id_t type)
268 return (ctf_dtdef_t *)
269 ctf_dynhash_lookup (fp->ctf_dthash, (void *) (uintptr_t) type);
273 ctf_dynamic_type (const ctf_dict_t *fp, ctf_id_t id)
277 if (!(fp->ctf_flags & LCTF_RDWR))
280 if ((fp->ctf_flags & LCTF_CHILD) && LCTF_TYPE_ISPARENT (fp, id))
283 idx = LCTF_TYPE_TO_INDEX(fp, id);
285 if ((unsigned long) idx <= fp->ctf_typemax)
286 return ctf_dtd_lookup (fp, id);
291 ctf_dvd_insert (ctf_dict_t *fp, ctf_dvdef_t *dvd)
293 if (ctf_dynhash_insert (fp->ctf_dvhash, dvd->dvd_name, dvd) < 0)
295 ctf_set_errno (fp, ENOMEM);
298 ctf_list_append (&fp->ctf_dvdefs, dvd);
303 ctf_dvd_delete (ctf_dict_t *fp, ctf_dvdef_t *dvd)
305 ctf_dynhash_remove (fp->ctf_dvhash, dvd->dvd_name);
306 free (dvd->dvd_name);
308 ctf_list_delete (&fp->ctf_dvdefs, dvd);
313 ctf_dvd_lookup (const ctf_dict_t *fp, const char *name)
315 return (ctf_dvdef_t *) ctf_dynhash_lookup (fp->ctf_dvhash, name);
318 /* Discard all of the dynamic type definitions and variable definitions that
319 have been added to the dict since the last call to ctf_update(). We locate
320 such types by scanning the dtd list and deleting elements that have type IDs
321 greater than ctf_dtoldid, which is set by ctf_update(), above, and by
322 scanning the variable list and deleting elements that have update IDs equal
323 to the current value of the last-update snapshot count (indicating that they
324 were added after the most recent call to ctf_update()). */
326 ctf_discard (ctf_dict_t *fp)
328 ctf_snapshot_id_t last_update =
330 fp->ctf_snapshot_lu + 1 };
332 /* Update required? */
333 if (!(fp->ctf_flags & LCTF_DIRTY))
336 return (ctf_rollback (fp, last_update));
340 ctf_snapshot (ctf_dict_t *fp)
342 ctf_snapshot_id_t snapid;
343 snapid.dtd_id = fp->ctf_typemax;
344 snapid.snapshot_id = fp->ctf_snapshots++;
348 /* Like ctf_discard(), only discards everything after a particular ID. */
350 ctf_rollback (ctf_dict_t *fp, ctf_snapshot_id_t id)
352 ctf_dtdef_t *dtd, *ntd;
353 ctf_dvdef_t *dvd, *nvd;
355 if (!(fp->ctf_flags & LCTF_RDWR))
356 return (ctf_set_errno (fp, ECTF_RDONLY));
358 if (fp->ctf_snapshot_lu >= id.snapshot_id)
359 return (ctf_set_errno (fp, ECTF_OVERROLLBACK));
361 for (dtd = ctf_list_next (&fp->ctf_dtdefs); dtd != NULL; dtd = ntd)
366 ntd = ctf_list_next (dtd);
368 if (LCTF_TYPE_TO_INDEX (fp, dtd->dtd_type) <= id.dtd_id)
371 kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
372 if (kind == CTF_K_FORWARD)
373 kind = dtd->dtd_data.ctt_type;
375 if (dtd->dtd_data.ctt_name
376 && (name = ctf_strraw (fp, dtd->dtd_data.ctt_name)) != NULL
377 && LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info))
379 ctf_dynhash_remove (ctf_name_table (fp, kind)->ctn_writable,
381 ctf_str_remove_ref (fp, name, &dtd->dtd_data.ctt_name);
384 ctf_dynhash_remove (fp->ctf_dthash, (void *) (uintptr_t) dtd->dtd_type);
385 ctf_dtd_delete (fp, dtd);
388 for (dvd = ctf_list_next (&fp->ctf_dvdefs); dvd != NULL; dvd = nvd)
390 nvd = ctf_list_next (dvd);
392 if (dvd->dvd_snapshots <= id.snapshot_id)
395 ctf_dvd_delete (fp, dvd);
398 fp->ctf_typemax = id.dtd_id;
399 fp->ctf_snapshots = id.snapshot_id;
401 if (fp->ctf_snapshots == fp->ctf_snapshot_lu)
402 fp->ctf_flags &= ~LCTF_DIRTY;
408 ctf_add_generic (ctf_dict_t *fp, uint32_t flag, const char *name, int kind,
414 if (flag != CTF_ADD_NONROOT && flag != CTF_ADD_ROOT)
415 return (ctf_set_errno (fp, EINVAL));
417 if (!(fp->ctf_flags & LCTF_RDWR))
418 return (ctf_set_errno (fp, ECTF_RDONLY));
420 if (LCTF_INDEX_TO_TYPE (fp, fp->ctf_typemax, 1) >= CTF_MAX_TYPE)
421 return (ctf_set_errno (fp, ECTF_FULL));
423 if (LCTF_INDEX_TO_TYPE (fp, fp->ctf_typemax, 1) == (CTF_MAX_PTYPE - 1))
424 return (ctf_set_errno (fp, ECTF_FULL));
426 /* Make sure ptrtab always grows to be big enough for all types. */
427 if (ctf_grow_ptrtab (fp) < 0)
428 return CTF_ERR; /* errno is set for us. */
430 if ((dtd = malloc (sizeof (ctf_dtdef_t))) == NULL)
431 return (ctf_set_errno (fp, EAGAIN));
433 type = ++fp->ctf_typemax;
434 type = LCTF_INDEX_TO_TYPE (fp, type, (fp->ctf_flags & LCTF_CHILD));
436 memset (dtd, 0, sizeof (ctf_dtdef_t));
437 dtd->dtd_data.ctt_name = ctf_str_add_ref (fp, name, &dtd->dtd_data.ctt_name);
438 dtd->dtd_type = type;
440 if (dtd->dtd_data.ctt_name == 0 && name != NULL && name[0] != '\0')
443 return (ctf_set_errno (fp, EAGAIN));
446 if (ctf_dtd_insert (fp, dtd, flag, kind) < 0)
449 return CTF_ERR; /* errno is set for us. */
451 fp->ctf_flags |= LCTF_DIRTY;
457 /* When encoding integer sizes, we want to convert a byte count in the range
458 1-8 to the closest power of 2 (e.g. 3->4, 5->8, etc). The clp2() function
459 is a clever implementation from "Hacker's Delight" by Henry Warren, Jr. */
475 ctf_add_encoded (ctf_dict_t *fp, uint32_t flag,
476 const char *name, const ctf_encoding_t *ep, uint32_t kind)
482 return (ctf_set_errno (fp, EINVAL));
484 if (name == NULL || name[0] == '\0')
485 return (ctf_set_errno (fp, ECTF_NONAME));
487 if ((type = ctf_add_generic (fp, flag, name, kind, &dtd)) == CTF_ERR)
488 return CTF_ERR; /* errno is set for us. */
490 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, flag, 0);
491 dtd->dtd_data.ctt_size = clp2 (P2ROUNDUP (ep->cte_bits, CHAR_BIT)
493 dtd->dtd_u.dtu_enc = *ep;
499 ctf_add_reftype (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref, uint32_t kind)
503 ctf_dict_t *tmp = fp;
504 int child = fp->ctf_flags & LCTF_CHILD;
506 if (ref == CTF_ERR || ref > CTF_MAX_TYPE)
507 return (ctf_set_errno (fp, EINVAL));
509 if (ref != 0 && ctf_lookup_by_id (&tmp, ref) == NULL)
510 return CTF_ERR; /* errno is set for us. */
512 if ((type = ctf_add_generic (fp, flag, NULL, kind, &dtd)) == CTF_ERR)
513 return CTF_ERR; /* errno is set for us. */
515 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, flag, 0);
516 dtd->dtd_data.ctt_type = (uint32_t) ref;
518 if (kind != CTF_K_POINTER)
521 /* If we are adding a pointer, update the ptrtab, pointing at this type from
522 the type it points to. Note that ctf_typemax is at this point one higher
523 than we want to check against, because it's just been incremented for the
524 addition of this type. The pptrtab is lazily-updated as needed, so is not
527 uint32_t type_idx = LCTF_TYPE_TO_INDEX (fp, type);
528 uint32_t ref_idx = LCTF_TYPE_TO_INDEX (fp, ref);
530 if (LCTF_TYPE_ISCHILD (fp, ref) == child
531 && ref_idx < fp->ctf_typemax)
532 fp->ctf_ptrtab[ref_idx] = type_idx;
538 ctf_add_slice (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref,
539 const ctf_encoding_t *ep)
542 ctf_id_t resolved_ref = ref;
545 const ctf_type_t *tp;
546 ctf_dict_t *tmp = fp;
549 return (ctf_set_errno (fp, EINVAL));
551 if ((ep->cte_bits > 255) || (ep->cte_offset > 255))
552 return (ctf_set_errno (fp, ECTF_SLICEOVERFLOW));
554 if (ref == CTF_ERR || ref > CTF_MAX_TYPE)
555 return (ctf_set_errno (fp, EINVAL));
557 if (ref != 0 && ((tp = ctf_lookup_by_id (&tmp, ref)) == NULL))
558 return CTF_ERR; /* errno is set for us. */
560 /* Make sure we ultimately point to an integral type. We also allow slices to
561 point to the unimplemented type, for now, because the compiler can emit
562 such slices, though they're not very much use. */
564 resolved_ref = ctf_type_resolve_unsliced (tmp, ref);
565 kind = ctf_type_kind_unsliced (tmp, resolved_ref);
567 if ((kind != CTF_K_INTEGER) && (kind != CTF_K_FLOAT) &&
570 return (ctf_set_errno (fp, ECTF_NOTINTFP));
572 if ((type = ctf_add_generic (fp, flag, NULL, CTF_K_SLICE, &dtd)) == CTF_ERR)
573 return CTF_ERR; /* errno is set for us. */
575 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_SLICE, flag, 0);
576 dtd->dtd_data.ctt_size = clp2 (P2ROUNDUP (ep->cte_bits, CHAR_BIT)
578 dtd->dtd_u.dtu_slice.cts_type = (uint32_t) ref;
579 dtd->dtd_u.dtu_slice.cts_bits = ep->cte_bits;
580 dtd->dtd_u.dtu_slice.cts_offset = ep->cte_offset;
586 ctf_add_integer (ctf_dict_t *fp, uint32_t flag,
587 const char *name, const ctf_encoding_t *ep)
589 return (ctf_add_encoded (fp, flag, name, ep, CTF_K_INTEGER));
593 ctf_add_float (ctf_dict_t *fp, uint32_t flag,
594 const char *name, const ctf_encoding_t *ep)
596 return (ctf_add_encoded (fp, flag, name, ep, CTF_K_FLOAT));
600 ctf_add_pointer (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref)
602 return (ctf_add_reftype (fp, flag, ref, CTF_K_POINTER));
606 ctf_add_array (ctf_dict_t *fp, uint32_t flag, const ctf_arinfo_t *arp)
610 ctf_dict_t *tmp = fp;
613 return (ctf_set_errno (fp, EINVAL));
615 if (arp->ctr_contents != 0
616 && ctf_lookup_by_id (&tmp, arp->ctr_contents) == NULL)
617 return CTF_ERR; /* errno is set for us. */
620 if (ctf_lookup_by_id (&tmp, arp->ctr_index) == NULL)
621 return CTF_ERR; /* errno is set for us. */
623 if (ctf_type_kind (fp, arp->ctr_index) == CTF_K_FORWARD)
625 ctf_err_warn (fp, 1, ECTF_INCOMPLETE,
626 _("ctf_add_array: index type %lx is incomplete"),
628 return (ctf_set_errno (fp, ECTF_INCOMPLETE));
631 if ((type = ctf_add_generic (fp, flag, NULL, CTF_K_ARRAY, &dtd)) == CTF_ERR)
632 return CTF_ERR; /* errno is set for us. */
634 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_ARRAY, flag, 0);
635 dtd->dtd_data.ctt_size = 0;
636 dtd->dtd_u.dtu_arr = *arp;
642 ctf_set_array (ctf_dict_t *fp, ctf_id_t type, const ctf_arinfo_t *arp)
644 ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, type);
646 if (!(fp->ctf_flags & LCTF_RDWR))
647 return (ctf_set_errno (fp, ECTF_RDONLY));
650 || LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info) != CTF_K_ARRAY)
651 return (ctf_set_errno (fp, ECTF_BADID));
653 fp->ctf_flags |= LCTF_DIRTY;
654 dtd->dtd_u.dtu_arr = *arp;
660 ctf_add_function (ctf_dict_t *fp, uint32_t flag,
661 const ctf_funcinfo_t *ctc, const ctf_id_t *argv)
666 uint32_t *vdat = NULL;
667 ctf_dict_t *tmp = fp;
670 if (!(fp->ctf_flags & LCTF_RDWR))
671 return (ctf_set_errno (fp, ECTF_RDONLY));
673 if (ctc == NULL || (ctc->ctc_flags & ~CTF_FUNC_VARARG) != 0
674 || (ctc->ctc_argc != 0 && argv == NULL))
675 return (ctf_set_errno (fp, EINVAL));
677 vlen = ctc->ctc_argc;
678 if (ctc->ctc_flags & CTF_FUNC_VARARG)
679 vlen++; /* Add trailing zero to indicate varargs (see below). */
681 if (ctc->ctc_return != 0
682 && ctf_lookup_by_id (&tmp, ctc->ctc_return) == NULL)
683 return CTF_ERR; /* errno is set for us. */
685 if (vlen > CTF_MAX_VLEN)
686 return (ctf_set_errno (fp, EOVERFLOW));
688 if (vlen != 0 && (vdat = malloc (sizeof (ctf_id_t) * vlen)) == NULL)
689 return (ctf_set_errno (fp, EAGAIN));
691 for (i = 0; i < ctc->ctc_argc; i++)
694 if (argv[i] != 0 && ctf_lookup_by_id (&tmp, argv[i]) == NULL)
697 return CTF_ERR; /* errno is set for us. */
699 vdat[i] = (uint32_t) argv[i];
702 if ((type = ctf_add_generic (fp, flag, NULL, CTF_K_FUNCTION,
706 return CTF_ERR; /* errno is set for us. */
709 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_FUNCTION, flag, vlen);
710 dtd->dtd_data.ctt_type = (uint32_t) ctc->ctc_return;
712 if (ctc->ctc_flags & CTF_FUNC_VARARG)
713 vdat[vlen - 1] = 0; /* Add trailing zero to indicate varargs. */
714 dtd->dtd_u.dtu_argv = vdat;
720 ctf_add_struct_sized (ctf_dict_t *fp, uint32_t flag, const char *name,
726 /* Promote root-visible forwards to structs. */
728 type = ctf_lookup_by_rawname (fp, CTF_K_STRUCT, name);
730 if (type != 0 && ctf_type_kind (fp, type) == CTF_K_FORWARD)
731 dtd = ctf_dtd_lookup (fp, type);
732 else if ((type = ctf_add_generic (fp, flag, name, CTF_K_STRUCT,
734 return CTF_ERR; /* errno is set for us. */
736 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_STRUCT, flag, 0);
738 if (size > CTF_MAX_SIZE)
740 dtd->dtd_data.ctt_size = CTF_LSIZE_SENT;
741 dtd->dtd_data.ctt_lsizehi = CTF_SIZE_TO_LSIZE_HI (size);
742 dtd->dtd_data.ctt_lsizelo = CTF_SIZE_TO_LSIZE_LO (size);
745 dtd->dtd_data.ctt_size = (uint32_t) size;
751 ctf_add_struct (ctf_dict_t *fp, uint32_t flag, const char *name)
753 return (ctf_add_struct_sized (fp, flag, name, 0));
757 ctf_add_union_sized (ctf_dict_t *fp, uint32_t flag, const char *name,
763 /* Promote root-visible forwards to unions. */
765 type = ctf_lookup_by_rawname (fp, CTF_K_UNION, name);
767 if (type != 0 && ctf_type_kind (fp, type) == CTF_K_FORWARD)
768 dtd = ctf_dtd_lookup (fp, type);
769 else if ((type = ctf_add_generic (fp, flag, name, CTF_K_UNION,
771 return CTF_ERR; /* errno is set for us */
773 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_UNION, flag, 0);
775 if (size > CTF_MAX_SIZE)
777 dtd->dtd_data.ctt_size = CTF_LSIZE_SENT;
778 dtd->dtd_data.ctt_lsizehi = CTF_SIZE_TO_LSIZE_HI (size);
779 dtd->dtd_data.ctt_lsizelo = CTF_SIZE_TO_LSIZE_LO (size);
782 dtd->dtd_data.ctt_size = (uint32_t) size;
788 ctf_add_union (ctf_dict_t *fp, uint32_t flag, const char *name)
790 return (ctf_add_union_sized (fp, flag, name, 0));
794 ctf_add_enum (ctf_dict_t *fp, uint32_t flag, const char *name)
799 /* Promote root-visible forwards to enums. */
801 type = ctf_lookup_by_rawname (fp, CTF_K_ENUM, name);
803 if (type != 0 && ctf_type_kind (fp, type) == CTF_K_FORWARD)
804 dtd = ctf_dtd_lookup (fp, type);
805 else if ((type = ctf_add_generic (fp, flag, name, CTF_K_ENUM,
807 return CTF_ERR; /* errno is set for us. */
809 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_ENUM, flag, 0);
810 dtd->dtd_data.ctt_size = fp->ctf_dmodel->ctd_int;
816 ctf_add_enum_encoded (ctf_dict_t *fp, uint32_t flag, const char *name,
817 const ctf_encoding_t *ep)
821 /* First, create the enum if need be, using most of the same machinery as
822 ctf_add_enum(), to ensure that we do not allow things past that are not
823 enums or forwards to them. (This includes other slices: you cannot slice a
824 slice, which would be a useless thing to do anyway.) */
827 type = ctf_lookup_by_rawname (fp, CTF_K_ENUM, name);
831 if ((ctf_type_kind (fp, type) != CTF_K_FORWARD) &&
832 (ctf_type_kind_unsliced (fp, type) != CTF_K_ENUM))
833 return (ctf_set_errno (fp, ECTF_NOTINTFP));
835 else if ((type = ctf_add_enum (fp, flag, name)) == CTF_ERR)
836 return CTF_ERR; /* errno is set for us. */
838 /* Now attach a suitable slice to it. */
840 return ctf_add_slice (fp, flag, type, ep);
844 ctf_add_forward (ctf_dict_t *fp, uint32_t flag, const char *name,
850 if (!ctf_forwardable_kind (kind))
851 return (ctf_set_errno (fp, ECTF_NOTSUE));
853 if (name == NULL || name[0] == '\0')
854 return (ctf_set_errno (fp, ECTF_NONAME));
856 /* If the type is already defined or exists as a forward tag, just
857 return the ctf_id_t of the existing definition. */
859 type = ctf_lookup_by_rawname (fp, kind, name);
864 if ((type = ctf_add_generic (fp, flag, name, kind, &dtd)) == CTF_ERR)
865 return CTF_ERR; /* errno is set for us. */
867 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_FORWARD, flag, 0);
868 dtd->dtd_data.ctt_type = kind;
874 ctf_add_typedef (ctf_dict_t *fp, uint32_t flag, const char *name,
879 ctf_dict_t *tmp = fp;
881 if (ref == CTF_ERR || ref > CTF_MAX_TYPE)
882 return (ctf_set_errno (fp, EINVAL));
884 if (name == NULL || name[0] == '\0')
885 return (ctf_set_errno (fp, ECTF_NONAME));
887 if (ref != 0 && ctf_lookup_by_id (&tmp, ref) == NULL)
888 return CTF_ERR; /* errno is set for us. */
890 if ((type = ctf_add_generic (fp, flag, name, CTF_K_TYPEDEF,
892 return CTF_ERR; /* errno is set for us. */
894 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_TYPEDEF, flag, 0);
895 dtd->dtd_data.ctt_type = (uint32_t) ref;
901 ctf_add_volatile (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref)
903 return (ctf_add_reftype (fp, flag, ref, CTF_K_VOLATILE));
907 ctf_add_const (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref)
909 return (ctf_add_reftype (fp, flag, ref, CTF_K_CONST));
913 ctf_add_restrict (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref)
915 return (ctf_add_reftype (fp, flag, ref, CTF_K_RESTRICT));
919 ctf_add_enumerator (ctf_dict_t *fp, ctf_id_t enid, const char *name,
922 ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, enid);
925 uint32_t kind, vlen, root;
929 return (ctf_set_errno (fp, EINVAL));
931 if (!(fp->ctf_flags & LCTF_RDWR))
932 return (ctf_set_errno (fp, ECTF_RDONLY));
935 return (ctf_set_errno (fp, ECTF_BADID));
937 kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
938 root = LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info);
939 vlen = LCTF_INFO_VLEN (fp, dtd->dtd_data.ctt_info);
941 if (kind != CTF_K_ENUM)
942 return (ctf_set_errno (fp, ECTF_NOTENUM));
944 if (vlen == CTF_MAX_VLEN)
945 return (ctf_set_errno (fp, ECTF_DTFULL));
947 for (dmd = ctf_list_next (&dtd->dtd_u.dtu_members);
948 dmd != NULL; dmd = ctf_list_next (dmd))
950 if (strcmp (dmd->dmd_name, name) == 0)
951 return (ctf_set_errno (fp, ECTF_DUPLICATE));
954 if ((dmd = malloc (sizeof (ctf_dmdef_t))) == NULL)
955 return (ctf_set_errno (fp, EAGAIN));
957 if ((s = strdup (name)) == NULL)
960 return (ctf_set_errno (fp, EAGAIN));
964 dmd->dmd_type = CTF_ERR;
966 dmd->dmd_value = value;
968 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, root, vlen + 1);
969 ctf_list_append (&dtd->dtd_u.dtu_members, dmd);
971 fp->ctf_flags |= LCTF_DIRTY;
977 ctf_add_member_offset (ctf_dict_t *fp, ctf_id_t souid, const char *name,
978 ctf_id_t type, unsigned long bit_offset)
980 ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, souid);
983 ssize_t msize, malign, ssize;
984 uint32_t kind, vlen, root;
986 int is_incomplete = 0;
988 if (!(fp->ctf_flags & LCTF_RDWR))
989 return (ctf_set_errno (fp, ECTF_RDONLY));
992 return (ctf_set_errno (fp, ECTF_BADID));
994 if (name != NULL && name[0] == '\0')
997 kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
998 root = LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info);
999 vlen = LCTF_INFO_VLEN (fp, dtd->dtd_data.ctt_info);
1001 if (kind != CTF_K_STRUCT && kind != CTF_K_UNION)
1002 return (ctf_set_errno (fp, ECTF_NOTSOU));
1004 if (vlen == CTF_MAX_VLEN)
1005 return (ctf_set_errno (fp, ECTF_DTFULL));
1009 for (dmd = ctf_list_next (&dtd->dtd_u.dtu_members);
1010 dmd != NULL; dmd = ctf_list_next (dmd))
1012 if (dmd->dmd_name != NULL && strcmp (dmd->dmd_name, name) == 0)
1013 return (ctf_set_errno (fp, ECTF_DUPLICATE));
1017 if ((msize = ctf_type_size (fp, type)) < 0 ||
1018 (malign = ctf_type_align (fp, type)) < 0)
1020 /* The unimplemented type, and any type that resolves to it, has no size
1021 and no alignment: it can correspond to any number of compiler-inserted
1022 types. We allow incomplete types through since they are routinely
1023 added to the ends of structures, and can even be added elsewhere in
1024 structures by the deduplicator. They are assumed to be zero-size with
1025 no alignment: this is often wrong, but problems can be avoided in this
1026 case by explicitly specifying the size of the structure via the _sized
1027 functions. The deduplicator always does this. */
1031 if (ctf_errno (fp) == ECTF_NONREPRESENTABLE)
1032 ctf_set_errno (fp, 0);
1033 else if (ctf_errno (fp) == ECTF_INCOMPLETE)
1036 return -1; /* errno is set for us. */
1039 if ((dmd = malloc (sizeof (ctf_dmdef_t))) == NULL)
1040 return (ctf_set_errno (fp, EAGAIN));
1042 if (name != NULL && (s = strdup (name)) == NULL)
1045 return (ctf_set_errno (fp, EAGAIN));
1049 dmd->dmd_type = type;
1050 dmd->dmd_value = -1;
1052 if (kind == CTF_K_STRUCT && vlen != 0)
1054 if (bit_offset == (unsigned long) - 1)
1056 /* Natural alignment. */
1058 ctf_dmdef_t *lmd = ctf_list_prev (&dtd->dtd_u.dtu_members);
1059 ctf_id_t ltype = ctf_type_resolve (fp, lmd->dmd_type);
1060 size_t off = lmd->dmd_offset;
1062 ctf_encoding_t linfo;
1065 /* Propagate any error from ctf_type_resolve. If the last member was
1066 of unimplemented type, this may be -ECTF_NONREPRESENTABLE: we
1067 cannot insert right after such a member without explicit offset
1068 specification, because its alignment and size is not known. */
1069 if (ltype == CTF_ERR)
1072 return -1; /* errno is set for us. */
1077 ctf_err_warn (fp, 1, ECTF_INCOMPLETE,
1078 _("ctf_add_member_offset: cannot add member %s of "
1079 "incomplete type %lx to struct %lx without "
1080 "specifying explicit offset\n"),
1081 name ? name : _("(unnamed member)"), type, souid);
1082 return (ctf_set_errno (fp, ECTF_INCOMPLETE));
1085 if (ctf_type_encoding (fp, ltype, &linfo) == 0)
1086 off += linfo.cte_bits;
1087 else if ((lsize = ctf_type_size (fp, ltype)) > 0)
1088 off += lsize * CHAR_BIT;
1089 else if (lsize == -1 && ctf_errno (fp) == ECTF_INCOMPLETE)
1091 ctf_err_warn (fp, 1, ECTF_INCOMPLETE,
1092 _("ctf_add_member_offset: cannot add member %s of "
1093 "type %lx to struct %lx without specifying "
1094 "explicit offset after member %s of type %lx, "
1095 "which is an incomplete type\n"),
1096 name ? name : _("(unnamed member)"), type, souid,
1097 lmd->dmd_name ? lmd->dmd_name
1098 : _("(unnamed member)"), ltype);
1099 return -1; /* errno is set for us. */
1102 /* Round up the offset of the end of the last member to
1103 the next byte boundary, convert 'off' to bytes, and
1104 then round it up again to the next multiple of the
1105 alignment required by the new member. Finally,
1106 convert back to bits and store the result in
1107 dmd_offset. Technically we could do more efficient
1108 packing if the new member is a bit-field, but we're
1109 the "compiler" and ANSI says we can do as we choose. */
1111 off = roundup (off, CHAR_BIT) / CHAR_BIT;
1112 off = roundup (off, MAX (malign, 1));
1113 dmd->dmd_offset = off * CHAR_BIT;
1114 ssize = off + msize;
1118 /* Specified offset in bits. */
1120 dmd->dmd_offset = bit_offset;
1121 ssize = ctf_get_ctt_size (fp, &dtd->dtd_data, NULL, NULL);
1122 ssize = MAX (ssize, ((signed) bit_offset / CHAR_BIT) + msize);
1127 dmd->dmd_offset = 0;
1128 ssize = ctf_get_ctt_size (fp, &dtd->dtd_data, NULL, NULL);
1129 ssize = MAX (ssize, msize);
1132 if ((size_t) ssize > CTF_MAX_SIZE)
1134 dtd->dtd_data.ctt_size = CTF_LSIZE_SENT;
1135 dtd->dtd_data.ctt_lsizehi = CTF_SIZE_TO_LSIZE_HI (ssize);
1136 dtd->dtd_data.ctt_lsizelo = CTF_SIZE_TO_LSIZE_LO (ssize);
1139 dtd->dtd_data.ctt_size = (uint32_t) ssize;
1141 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, root, vlen + 1);
1142 ctf_list_append (&dtd->dtd_u.dtu_members, dmd);
1144 fp->ctf_flags |= LCTF_DIRTY;
1149 ctf_add_member_encoded (ctf_dict_t *fp, ctf_id_t souid, const char *name,
1150 ctf_id_t type, unsigned long bit_offset,
1151 const ctf_encoding_t encoding)
1153 ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, type);
1154 int kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
1157 if ((kind != CTF_K_INTEGER) && (kind != CTF_K_FLOAT) && (kind != CTF_K_ENUM))
1158 return (ctf_set_errno (fp, ECTF_NOTINTFP));
1160 if ((type = ctf_add_slice (fp, CTF_ADD_NONROOT, otype, &encoding)) == CTF_ERR)
1161 return -1; /* errno is set for us. */
1163 return ctf_add_member_offset (fp, souid, name, type, bit_offset);
1167 ctf_add_member (ctf_dict_t *fp, ctf_id_t souid, const char *name,
1170 return ctf_add_member_offset (fp, souid, name, type, (unsigned long) - 1);
1174 ctf_add_variable (ctf_dict_t *fp, const char *name, ctf_id_t ref)
1177 ctf_dict_t *tmp = fp;
1179 if (!(fp->ctf_flags & LCTF_RDWR))
1180 return (ctf_set_errno (fp, ECTF_RDONLY));
1182 if (ctf_dvd_lookup (fp, name) != NULL)
1183 return (ctf_set_errno (fp, ECTF_DUPLICATE));
1185 if (ctf_lookup_by_id (&tmp, ref) == NULL)
1186 return -1; /* errno is set for us. */
1188 /* Make sure this type is representable. */
1189 if ((ctf_type_resolve (fp, ref) == CTF_ERR)
1190 && (ctf_errno (fp) == ECTF_NONREPRESENTABLE))
1193 if ((dvd = malloc (sizeof (ctf_dvdef_t))) == NULL)
1194 return (ctf_set_errno (fp, EAGAIN));
1196 if (name != NULL && (dvd->dvd_name = strdup (name)) == NULL)
1199 return (ctf_set_errno (fp, EAGAIN));
1201 dvd->dvd_type = ref;
1202 dvd->dvd_snapshots = fp->ctf_snapshots;
1204 if (ctf_dvd_insert (fp, dvd) < 0)
1206 free (dvd->dvd_name);
1208 return -1; /* errno is set for us. */
1211 fp->ctf_flags |= LCTF_DIRTY;
1216 ctf_add_funcobjt_sym (ctf_dict_t *fp, int is_function, const char *name, ctf_id_t id)
1218 ctf_dict_t *tmp = fp;
1220 ctf_dynhash_t *h = is_function ? fp->ctf_funchash : fp->ctf_objthash;
1222 if (!(fp->ctf_flags & LCTF_RDWR))
1223 return (ctf_set_errno (fp, ECTF_RDONLY));
1225 if (ctf_dynhash_lookup (fp->ctf_objthash, name) != NULL ||
1226 ctf_dynhash_lookup (fp->ctf_funchash, name) != NULL)
1227 return (ctf_set_errno (fp, ECTF_DUPLICATE));
1229 if (ctf_lookup_by_id (&tmp, id) == NULL)
1230 return -1; /* errno is set for us. */
1232 if (is_function && ctf_type_kind (fp, id) != CTF_K_FUNCTION)
1233 return (ctf_set_errno (fp, ECTF_NOTFUNC));
1235 if ((dupname = strdup (name)) == NULL)
1236 return (ctf_set_errno (fp, ENOMEM));
1238 if (ctf_dynhash_insert (h, dupname, (void *) (uintptr_t) id) < 0)
1241 return (ctf_set_errno (fp, ENOMEM));
1247 ctf_add_objt_sym (ctf_dict_t *fp, const char *name, ctf_id_t id)
1249 return (ctf_add_funcobjt_sym (fp, 0, name, id));
1253 ctf_add_func_sym (ctf_dict_t *fp, const char *name, ctf_id_t id)
1255 return (ctf_add_funcobjt_sym (fp, 1, name, id));
1258 typedef struct ctf_bundle
1260 ctf_dict_t *ctb_dict; /* CTF dict handle. */
1261 ctf_id_t ctb_type; /* CTF type identifier. */
1262 ctf_dtdef_t *ctb_dtd; /* CTF dynamic type definition (if any). */
1266 enumcmp (const char *name, int value, void *arg)
1268 ctf_bundle_t *ctb = arg;
1271 if (ctf_enum_value (ctb->ctb_dict, ctb->ctb_type, name, &bvalue) < 0)
1273 ctf_err_warn (ctb->ctb_dict, 0, 0,
1274 _("conflict due to enum %s iteration error"), name);
1277 if (value != bvalue)
1279 ctf_err_warn (ctb->ctb_dict, 1, ECTF_CONFLICT,
1280 _("conflict due to enum value change: %i versus %i"),
1288 enumadd (const char *name, int value, void *arg)
1290 ctf_bundle_t *ctb = arg;
1292 return (ctf_add_enumerator (ctb->ctb_dict, ctb->ctb_type,
1297 membcmp (const char *name, ctf_id_t type _libctf_unused_, unsigned long offset,
1300 ctf_bundle_t *ctb = arg;
1303 /* Don't check nameless members (e.g. anonymous structs/unions) against each
1308 if (ctf_member_info (ctb->ctb_dict, ctb->ctb_type, name, &ctm) < 0)
1310 ctf_err_warn (ctb->ctb_dict, 0, 0,
1311 _("conflict due to struct member %s iteration error"),
1315 if (ctm.ctm_offset != offset)
1317 ctf_err_warn (ctb->ctb_dict, 1, ECTF_CONFLICT,
1318 _("conflict due to struct member %s offset change: "
1320 name, ctm.ctm_offset, offset);
1327 membadd (const char *name, ctf_id_t type, unsigned long offset, void *arg)
1329 ctf_bundle_t *ctb = arg;
1333 if ((dmd = malloc (sizeof (ctf_dmdef_t))) == NULL)
1334 return (ctf_set_errno (ctb->ctb_dict, EAGAIN));
1336 /* Unnamed members in non-dynamic dicts have a name of "", while dynamic dicts
1342 if (name != NULL && (s = strdup (name)) == NULL)
1345 return (ctf_set_errno (ctb->ctb_dict, EAGAIN));
1348 /* For now, dmd_type is copied as the src_fp's type; it is reset to an
1349 equivalent dst_fp type by a final loop in ctf_add_type(), below. */
1351 dmd->dmd_type = type;
1352 dmd->dmd_offset = offset;
1353 dmd->dmd_value = -1;
1355 ctf_list_append (&ctb->ctb_dtd->dtd_u.dtu_members, dmd);
1357 ctb->ctb_dict->ctf_flags |= LCTF_DIRTY;
1361 /* Record the correspondence between a source and ctf_add_type()-added
1362 destination type: both types are translated into parent type IDs if need be,
1363 so they relate to the actual dictionary they are in. Outside controlled
1364 circumstances (like linking) it is probably not useful to do more than
1365 compare these pointers, since there is nothing stopping the user closing the
1366 source dict whenever they want to.
1368 Our OOM handling here is just to not do anything, because this is called deep
1369 enough in the call stack that doing anything useful is painfully difficult:
1370 the worst consequence if we do OOM is a bit of type duplication anyway. */
1373 ctf_add_type_mapping (ctf_dict_t *src_fp, ctf_id_t src_type,
1374 ctf_dict_t *dst_fp, ctf_id_t dst_type)
1376 if (LCTF_TYPE_ISPARENT (src_fp, src_type) && src_fp->ctf_parent)
1377 src_fp = src_fp->ctf_parent;
1379 src_type = LCTF_TYPE_TO_INDEX(src_fp, src_type);
1381 if (LCTF_TYPE_ISPARENT (dst_fp, dst_type) && dst_fp->ctf_parent)
1382 dst_fp = dst_fp->ctf_parent;
1384 dst_type = LCTF_TYPE_TO_INDEX(dst_fp, dst_type);
1386 if (dst_fp->ctf_link_type_mapping == NULL)
1388 ctf_hash_fun f = ctf_hash_type_key;
1389 ctf_hash_eq_fun e = ctf_hash_eq_type_key;
1391 if ((dst_fp->ctf_link_type_mapping = ctf_dynhash_create (f, e, free,
1396 ctf_link_type_key_t *key;
1397 key = calloc (1, sizeof (struct ctf_link_type_key));
1401 key->cltk_fp = src_fp;
1402 key->cltk_idx = src_type;
1404 /* No OOM checking needed, because if this doesn't work the worst we'll do is
1405 add a few more duplicate types (which will probably run out of memory
1407 ctf_dynhash_insert (dst_fp->ctf_link_type_mapping, key,
1408 (void *) (uintptr_t) dst_type);
1411 /* Look up a type mapping: return 0 if none. The DST_FP is modified to point to
1412 the parent if need be. The ID returned is from the dst_fp's perspective. */
1414 ctf_type_mapping (ctf_dict_t *src_fp, ctf_id_t src_type, ctf_dict_t **dst_fp)
1416 ctf_link_type_key_t key;
1417 ctf_dict_t *target_fp = *dst_fp;
1418 ctf_id_t dst_type = 0;
1420 if (LCTF_TYPE_ISPARENT (src_fp, src_type) && src_fp->ctf_parent)
1421 src_fp = src_fp->ctf_parent;
1423 src_type = LCTF_TYPE_TO_INDEX(src_fp, src_type);
1424 key.cltk_fp = src_fp;
1425 key.cltk_idx = src_type;
1427 if (target_fp->ctf_link_type_mapping)
1428 dst_type = (uintptr_t) ctf_dynhash_lookup (target_fp->ctf_link_type_mapping,
1433 dst_type = LCTF_INDEX_TO_TYPE (target_fp, dst_type,
1434 target_fp->ctf_parent != NULL);
1435 *dst_fp = target_fp;
1439 if (target_fp->ctf_parent)
1440 target_fp = target_fp->ctf_parent;
1444 if (target_fp->ctf_link_type_mapping)
1445 dst_type = (uintptr_t) ctf_dynhash_lookup (target_fp->ctf_link_type_mapping,
1449 dst_type = LCTF_INDEX_TO_TYPE (target_fp, dst_type,
1450 target_fp->ctf_parent != NULL);
1452 *dst_fp = target_fp;
1456 /* The ctf_add_type routine is used to copy a type from a source CTF dictionary
1457 to a dynamic destination dictionary. This routine operates recursively by
1458 following the source type's links and embedded member types. If the
1459 destination dict already contains a named type which has the same attributes,
1460 then we succeed and return this type but no changes occur. */
1462 ctf_add_type_internal (ctf_dict_t *dst_fp, ctf_dict_t *src_fp, ctf_id_t src_type,
1463 ctf_dict_t *proc_tracking_fp)
1465 ctf_id_t dst_type = CTF_ERR;
1466 uint32_t dst_kind = CTF_K_UNKNOWN;
1467 ctf_dict_t *tmp_fp = dst_fp;
1471 uint32_t kind, forward_kind, flag, vlen;
1473 const ctf_type_t *src_tp, *dst_tp;
1474 ctf_bundle_t src, dst;
1475 ctf_encoding_t src_en, dst_en;
1476 ctf_arinfo_t src_ar, dst_ar;
1480 ctf_id_t orig_src_type = src_type;
1482 if (!(dst_fp->ctf_flags & LCTF_RDWR))
1483 return (ctf_set_errno (dst_fp, ECTF_RDONLY));
1485 if ((src_tp = ctf_lookup_by_id (&src_fp, src_type)) == NULL)
1486 return (ctf_set_errno (dst_fp, ctf_errno (src_fp)));
1488 if ((ctf_type_resolve (src_fp, src_type) == CTF_ERR)
1489 && (ctf_errno (src_fp) == ECTF_NONREPRESENTABLE))
1490 return (ctf_set_errno (dst_fp, ECTF_NONREPRESENTABLE));
1492 name = ctf_strptr (src_fp, src_tp->ctt_name);
1493 kind = LCTF_INFO_KIND (src_fp, src_tp->ctt_info);
1494 flag = LCTF_INFO_ISROOT (src_fp, src_tp->ctt_info);
1495 vlen = LCTF_INFO_VLEN (src_fp, src_tp->ctt_info);
1497 /* If this is a type we are currently in the middle of adding, hand it
1498 straight back. (This lets us handle self-referential structures without
1499 considering forwards and empty structures the same as their completed
1502 tmp = ctf_type_mapping (src_fp, src_type, &tmp_fp);
1506 if (ctf_dynhash_lookup (proc_tracking_fp->ctf_add_processing,
1507 (void *) (uintptr_t) src_type))
1510 /* If this type has already been added from this dictionary, and is the
1511 same kind and (if a struct or union) has the same number of members,
1512 hand it straight back. */
1514 if (ctf_type_kind_unsliced (tmp_fp, tmp) == (int) kind)
1516 if (kind == CTF_K_STRUCT || kind == CTF_K_UNION
1517 || kind == CTF_K_ENUM)
1519 if ((dst_tp = ctf_lookup_by_id (&tmp_fp, dst_type)) != NULL)
1520 if (vlen == LCTF_INFO_VLEN (tmp_fp, dst_tp->ctt_info))
1528 forward_kind = kind;
1529 if (kind == CTF_K_FORWARD)
1530 forward_kind = src_tp->ctt_type;
1532 /* If the source type has a name and is a root type (visible at the top-level
1533 scope), lookup the name in the destination dictionary and verify that it is
1534 of the same kind before we do anything else. */
1536 if ((flag & CTF_ADD_ROOT) && name[0] != '\0'
1537 && (tmp = ctf_lookup_by_rawname (dst_fp, forward_kind, name)) != 0)
1540 dst_kind = ctf_type_kind_unsliced (dst_fp, dst_type);
1543 /* If an identically named dst_type exists, fail with ECTF_CONFLICT
1544 unless dst_type is a forward declaration and src_type is a struct,
1545 union, or enum (i.e. the definition of the previous forward decl).
1547 We also allow addition in the opposite order (addition of a forward when a
1548 struct, union, or enum already exists), which is a NOP and returns the
1549 already-present struct, union, or enum. */
1551 if (dst_type != CTF_ERR && dst_kind != kind)
1553 if (kind == CTF_K_FORWARD
1554 && (dst_kind == CTF_K_ENUM || dst_kind == CTF_K_STRUCT
1555 || dst_kind == CTF_K_UNION))
1557 ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type);
1561 if (dst_kind != CTF_K_FORWARD
1562 || (kind != CTF_K_ENUM && kind != CTF_K_STRUCT
1563 && kind != CTF_K_UNION))
1565 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
1566 _("ctf_add_type: conflict for type %s: "
1567 "kinds differ, new: %i; old (ID %lx): %i"),
1568 name, kind, dst_type, dst_kind);
1569 return (ctf_set_errno (dst_fp, ECTF_CONFLICT));
1573 /* We take special action for an integer, float, or slice since it is
1574 described not only by its name but also its encoding. For integers,
1575 bit-fields exploit this degeneracy. */
1577 if (kind == CTF_K_INTEGER || kind == CTF_K_FLOAT || kind == CTF_K_SLICE)
1579 if (ctf_type_encoding (src_fp, src_type, &src_en) != 0)
1580 return (ctf_set_errno (dst_fp, ctf_errno (src_fp)));
1582 if (dst_type != CTF_ERR)
1584 ctf_dict_t *fp = dst_fp;
1586 if ((dst_tp = ctf_lookup_by_id (&fp, dst_type)) == NULL)
1589 if (ctf_type_encoding (dst_fp, dst_type, &dst_en) != 0)
1590 return CTF_ERR; /* errno set for us. */
1592 if (LCTF_INFO_ISROOT (fp, dst_tp->ctt_info) & CTF_ADD_ROOT)
1594 /* The type that we found in the hash is also root-visible. If
1595 the two types match then use the existing one; otherwise,
1596 declare a conflict. Note: slices are not certain to match
1597 even if there is no conflict: we must check the contained type
1600 if (memcmp (&src_en, &dst_en, sizeof (ctf_encoding_t)) == 0)
1602 if (kind != CTF_K_SLICE)
1604 ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type);
1610 return (ctf_set_errno (dst_fp, ECTF_CONFLICT));
1615 /* We found a non-root-visible type in the hash. If its encoding
1616 is the same, we can reuse it, unless it is a slice. */
1618 if (memcmp (&src_en, &dst_en, sizeof (ctf_encoding_t)) == 0)
1620 if (kind != CTF_K_SLICE)
1622 ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type);
1630 src.ctb_dict = src_fp;
1631 src.ctb_type = src_type;
1634 dst.ctb_dict = dst_fp;
1635 dst.ctb_type = dst_type;
1638 /* Now perform kind-specific processing. If dst_type is CTF_ERR, then we add
1639 a new type with the same properties as src_type to dst_fp. If dst_type is
1640 not CTF_ERR, then we verify that dst_type has the same attributes as
1641 src_type. We recurse for embedded references. Before we start, we note
1642 that we are processing this type, to prevent infinite recursion: we do not
1643 re-process any type that appears in this list. The list is emptied
1644 wholesale at the end of processing everything in this recursive stack. */
1646 if (ctf_dynhash_insert (proc_tracking_fp->ctf_add_processing,
1647 (void *) (uintptr_t) src_type, (void *) 1) < 0)
1648 return ctf_set_errno (dst_fp, ENOMEM);
1653 /* If we found a match we will have either returned it or declared a
1655 dst_type = ctf_add_integer (dst_fp, flag, name, &src_en);
1659 /* If we found a match we will have either returned it or declared a
1661 dst_type = ctf_add_float (dst_fp, flag, name, &src_en);
1665 /* We have checked for conflicting encodings: now try to add the
1667 src_type = ctf_type_reference (src_fp, src_type);
1668 src_type = ctf_add_type_internal (dst_fp, src_fp, src_type,
1671 if (src_type == CTF_ERR)
1672 return CTF_ERR; /* errno is set for us. */
1674 dst_type = ctf_add_slice (dst_fp, flag, src_type, &src_en);
1678 case CTF_K_VOLATILE:
1680 case CTF_K_RESTRICT:
1681 src_type = ctf_type_reference (src_fp, src_type);
1682 src_type = ctf_add_type_internal (dst_fp, src_fp, src_type,
1685 if (src_type == CTF_ERR)
1686 return CTF_ERR; /* errno is set for us. */
1688 dst_type = ctf_add_reftype (dst_fp, flag, src_type, kind);
1692 if (ctf_array_info (src_fp, src_type, &src_ar) != 0)
1693 return (ctf_set_errno (dst_fp, ctf_errno (src_fp)));
1695 src_ar.ctr_contents =
1696 ctf_add_type_internal (dst_fp, src_fp, src_ar.ctr_contents,
1698 src_ar.ctr_index = ctf_add_type_internal (dst_fp, src_fp,
1701 src_ar.ctr_nelems = src_ar.ctr_nelems;
1703 if (src_ar.ctr_contents == CTF_ERR || src_ar.ctr_index == CTF_ERR)
1704 return CTF_ERR; /* errno is set for us. */
1706 if (dst_type != CTF_ERR)
1708 if (ctf_array_info (dst_fp, dst_type, &dst_ar) != 0)
1709 return CTF_ERR; /* errno is set for us. */
1711 if (memcmp (&src_ar, &dst_ar, sizeof (ctf_arinfo_t)))
1713 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
1714 _("conflict for type %s against ID %lx: array info "
1715 "differs, old %lx/%lx/%x; new: %lx/%lx/%x"),
1716 name, dst_type, src_ar.ctr_contents,
1717 src_ar.ctr_index, src_ar.ctr_nelems,
1718 dst_ar.ctr_contents, dst_ar.ctr_index,
1720 return (ctf_set_errno (dst_fp, ECTF_CONFLICT));
1724 dst_type = ctf_add_array (dst_fp, flag, &src_ar);
1727 case CTF_K_FUNCTION:
1728 ctc.ctc_return = ctf_add_type_internal (dst_fp, src_fp,
1734 if (ctc.ctc_return == CTF_ERR)
1735 return CTF_ERR; /* errno is set for us. */
1737 dst_type = ctf_add_function (dst_fp, flag, &ctc, NULL);
1749 /* Technically to match a struct or union we need to check both
1750 ways (src members vs. dst, dst members vs. src) but we make
1751 this more optimal by only checking src vs. dst and comparing
1752 the total size of the structure (which we must do anyway)
1753 which covers the possibility of dst members not in src.
1754 This optimization can be defeated for unions, but is so
1755 pathological as to render it irrelevant for our purposes. */
1757 if (dst_type != CTF_ERR && kind != CTF_K_FORWARD
1758 && dst_kind != CTF_K_FORWARD)
1760 if (ctf_type_size (src_fp, src_type) !=
1761 ctf_type_size (dst_fp, dst_type))
1763 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
1764 _("conflict for type %s against ID %lx: union "
1765 "size differs, old %li, new %li"), name,
1766 dst_type, (long) ctf_type_size (src_fp, src_type),
1767 (long) ctf_type_size (dst_fp, dst_type));
1768 return (ctf_set_errno (dst_fp, ECTF_CONFLICT));
1771 if (ctf_member_iter (src_fp, src_type, membcmp, &dst))
1773 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
1774 _("conflict for type %s against ID %lx: members "
1775 "differ, see above"), name, dst_type);
1776 return (ctf_set_errno (dst_fp, ECTF_CONFLICT));
1782 /* Unlike the other cases, copying structs and unions is done
1783 manually so as to avoid repeated lookups in ctf_add_member
1784 and to ensure the exact same member offsets as in src_type. */
1786 dst_type = ctf_add_generic (dst_fp, flag, name, kind, &dtd);
1787 if (dst_type == CTF_ERR)
1788 return CTF_ERR; /* errno is set for us. */
1790 dst.ctb_type = dst_type;
1793 /* Pre-emptively add this struct to the type mapping so that
1794 structures that refer to themselves work. */
1795 ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type);
1797 if (ctf_member_iter (src_fp, src_type, membadd, &dst) != 0)
1798 errs++; /* Increment errs and fail at bottom of case. */
1800 if ((ssize = ctf_type_size (src_fp, src_type)) < 0)
1801 return CTF_ERR; /* errno is set for us. */
1803 size = (size_t) ssize;
1804 if (size > CTF_MAX_SIZE)
1806 dtd->dtd_data.ctt_size = CTF_LSIZE_SENT;
1807 dtd->dtd_data.ctt_lsizehi = CTF_SIZE_TO_LSIZE_HI (size);
1808 dtd->dtd_data.ctt_lsizelo = CTF_SIZE_TO_LSIZE_LO (size);
1811 dtd->dtd_data.ctt_size = (uint32_t) size;
1813 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, flag, vlen);
1815 /* Make a final pass through the members changing each dmd_type (a
1816 src_fp type) to an equivalent type in dst_fp. We pass through all
1817 members, leaving any that fail set to CTF_ERR, unless they fail
1818 because they are marking a member of type not representable in this
1819 version of CTF, in which case we just want to silently omit them:
1820 no consumer can do anything with them anyway. */
1821 for (dmd = ctf_list_next (&dtd->dtd_u.dtu_members);
1822 dmd != NULL; dmd = ctf_list_next (dmd))
1824 ctf_dict_t *dst = dst_fp;
1827 memb_type = ctf_type_mapping (src_fp, dmd->dmd_type, &dst);
1830 if ((dmd->dmd_type =
1831 ctf_add_type_internal (dst_fp, src_fp, dmd->dmd_type,
1832 proc_tracking_fp)) == CTF_ERR)
1834 if (ctf_errno (dst_fp) != ECTF_NONREPRESENTABLE)
1839 dmd->dmd_type = memb_type;
1843 return CTF_ERR; /* errno is set for us. */
1848 if (dst_type != CTF_ERR && kind != CTF_K_FORWARD
1849 && dst_kind != CTF_K_FORWARD)
1851 if (ctf_enum_iter (src_fp, src_type, enumcmp, &dst)
1852 || ctf_enum_iter (dst_fp, dst_type, enumcmp, &src))
1854 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
1855 _("conflict for enum %s against ID %lx: members "
1856 "differ, see above"), name, dst_type);
1857 return (ctf_set_errno (dst_fp, ECTF_CONFLICT));
1862 dst_type = ctf_add_enum (dst_fp, flag, name);
1863 if ((dst.ctb_type = dst_type) == CTF_ERR
1864 || ctf_enum_iter (src_fp, src_type, enumadd, &dst))
1865 return CTF_ERR; /* errno is set for us */
1870 if (dst_type == CTF_ERR)
1871 dst_type = ctf_add_forward (dst_fp, flag, name, forward_kind);
1875 src_type = ctf_type_reference (src_fp, src_type);
1876 src_type = ctf_add_type_internal (dst_fp, src_fp, src_type,
1879 if (src_type == CTF_ERR)
1880 return CTF_ERR; /* errno is set for us. */
1882 /* If dst_type is not CTF_ERR at this point, we should check if
1883 ctf_type_reference(dst_fp, dst_type) != src_type and if so fail with
1884 ECTF_CONFLICT. However, this causes problems with bitness typedefs
1885 that vary based on things like if 32-bit then pid_t is int otherwise
1886 long. We therefore omit this check and assume that if the identically
1887 named typedef already exists in dst_fp, it is correct or
1890 if (dst_type == CTF_ERR)
1891 dst_type = ctf_add_typedef (dst_fp, flag, name, src_type);
1896 return (ctf_set_errno (dst_fp, ECTF_CORRUPT));
1899 if (dst_type != CTF_ERR)
1900 ctf_add_type_mapping (src_fp, orig_src_type, dst_fp, dst_type);
1905 ctf_add_type (ctf_dict_t *dst_fp, ctf_dict_t *src_fp, ctf_id_t src_type)
1909 if (!src_fp->ctf_add_processing)
1910 src_fp->ctf_add_processing = ctf_dynhash_create (ctf_hash_integer,
1911 ctf_hash_eq_integer,
1914 /* We store the hash on the source, because it contains only source type IDs:
1915 but callers will invariably expect errors to appear on the dest. */
1916 if (!src_fp->ctf_add_processing)
1917 return (ctf_set_errno (dst_fp, ENOMEM));
1919 id = ctf_add_type_internal (dst_fp, src_fp, src_type, src_fp);
1920 ctf_dynhash_empty (src_fp->ctf_add_processing);