X-Git-Url: https://repo.jachan.dev/binutils.git/blobdiff_plain/77648241384a5c46f059efeb157a2887116d844a..HEAD:/libctf/ctf-hash.c diff --git a/libctf/ctf-hash.c b/libctf/ctf-hash.c index 7eba494a51..94bd5b62ec 100644 --- a/libctf/ctf-hash.c +++ b/libctf/ctf-hash.c @@ -1,5 +1,5 @@ /* Interface to hashtable implementations. - Copyright (C) 2006-2020 Free Software Foundation, Inc. + Copyright (C) 2006-2022 Free Software Foundation, Inc. This file is part of libctf. @@ -94,36 +94,50 @@ ctf_hash_eq_string (const void *a, const void *b) return !strcmp((const char *) hep_a->key, (const char *) hep_b->key); } -/* Hash a type_mapping_key. */ +/* Hash a type_key. */ unsigned int -ctf_hash_type_mapping_key (const void *ptr) +ctf_hash_type_key (const void *ptr) { ctf_helem_t *hep = (ctf_helem_t *) ptr; - ctf_link_type_mapping_key_t *k = (ctf_link_type_mapping_key_t *) hep->key; + ctf_link_type_key_t *k = (ctf_link_type_key_t *) hep->key; - return htab_hash_pointer (k->cltm_fp) + 59 * htab_hash_pointer ((void *) k->cltm_idx); + return htab_hash_pointer (k->cltk_fp) + 59 + * htab_hash_pointer ((void *) (uintptr_t) k->cltk_idx); } int -ctf_hash_eq_type_mapping_key (const void *a, const void *b) +ctf_hash_eq_type_key (const void *a, const void *b) { ctf_helem_t *hep_a = (ctf_helem_t *) a; ctf_helem_t *hep_b = (ctf_helem_t *) b; - ctf_link_type_mapping_key_t *key_a = (ctf_link_type_mapping_key_t *) hep_a->key; - ctf_link_type_mapping_key_t *key_b = (ctf_link_type_mapping_key_t *) hep_b->key; + ctf_link_type_key_t *key_a = (ctf_link_type_key_t *) hep_a->key; + ctf_link_type_key_t *key_b = (ctf_link_type_key_t *) hep_b->key; - return (key_a->cltm_fp == key_b->cltm_fp) - && (key_a->cltm_idx == key_b->cltm_idx); + return (key_a->cltk_fp == key_b->cltk_fp) + && (key_a->cltk_idx == key_b->cltk_idx); } +/* Hash a type_id_key. */ +unsigned int +ctf_hash_type_id_key (const void *ptr) +{ + ctf_helem_t *hep = (ctf_helem_t *) ptr; + ctf_type_id_key_t *k = (ctf_type_id_key_t *) hep->key; -/* Hash and eq functions for the dynset. Most of these can just use the - underlying hashtab functions directly. */ + return htab_hash_pointer ((void *) (uintptr_t) k->ctii_input_num) + + 59 * htab_hash_pointer ((void *) (uintptr_t) k->ctii_type); +} int -ctf_dynset_eq_string (const void *a, const void *b) +ctf_hash_eq_type_id_key (const void *a, const void *b) { - return !strcmp((const char *) a, (const char *) b); + ctf_helem_t *hep_a = (ctf_helem_t *) a; + ctf_helem_t *hep_b = (ctf_helem_t *) b; + ctf_type_id_key_t *key_a = (ctf_type_id_key_t *) hep_a->key; + ctf_type_id_key_t *key_b = (ctf_type_id_key_t *) hep_b->key; + + return (key_a->ctii_input_num == key_b->ctii_input_num) + && (key_a->ctii_type == key_b->ctii_type); } /* The dynhash, used for hashes whose size is not known at creation time. */ @@ -378,6 +392,170 @@ ctf_dynhash_iter_remove (ctf_dynhash_t *hp, ctf_hash_iter_remove_f fun, htab_traverse (hp->htab, ctf_hashtab_traverse_remove, &arg); } +/* Traverse a dynhash in arbitrary order, in _next iterator form. + + Mutating the dynhash while iterating is not supported (just as it isn't for + htab_traverse). + + Note: unusually, this returns zero on success and a *positive* value on + error, because it does not take an fp, taking an error pointer would be + incredibly clunky, and nearly all error-handling ends up stuffing the result + of this into some sort of errno or ctf_errno, which is invariably + positive. So doing this simplifies essentially all callers. */ +int +ctf_dynhash_next (ctf_dynhash_t *h, ctf_next_t **it, void **key, void **value) +{ + ctf_next_t *i = *it; + ctf_helem_t *slot; + + if (!i) + { + size_t size = htab_size (h->htab); + + /* If the table has too many entries to fit in an ssize_t, just give up. + This might be spurious, but if any type-related hashtable has ever been + nearly as large as that then something very odd is going on. */ + if (((ssize_t) size) < 0) + return EDOM; + + if ((i = ctf_next_create ()) == NULL) + return ENOMEM; + + i->u.ctn_hash_slot = h->htab->entries; + i->cu.ctn_h = h; + i->ctn_n = 0; + i->ctn_size = (ssize_t) size; + i->ctn_iter_fun = (void (*) (void)) ctf_dynhash_next; + *it = i; + } + + if ((void (*) (void)) ctf_dynhash_next != i->ctn_iter_fun) + return ECTF_NEXT_WRONGFUN; + + if (h != i->cu.ctn_h) + return ECTF_NEXT_WRONGFP; + + if ((ssize_t) i->ctn_n == i->ctn_size) + goto hash_end; + + while ((ssize_t) i->ctn_n < i->ctn_size + && (*i->u.ctn_hash_slot == HTAB_EMPTY_ENTRY + || *i->u.ctn_hash_slot == HTAB_DELETED_ENTRY)) + { + i->u.ctn_hash_slot++; + i->ctn_n++; + } + + if ((ssize_t) i->ctn_n == i->ctn_size) + goto hash_end; + + slot = *i->u.ctn_hash_slot; + + if (key) + *key = slot->key; + if (value) + *value = slot->value; + + i->u.ctn_hash_slot++; + i->ctn_n++; + + return 0; + + hash_end: + ctf_next_destroy (i); + *it = NULL; + return ECTF_NEXT_END; +} + +int +ctf_dynhash_sort_by_name (const ctf_next_hkv_t *one, const ctf_next_hkv_t *two, + void *unused _libctf_unused_) +{ + return strcmp ((char *) one->hkv_key, (char *) two->hkv_key); +} + +/* Traverse a sorted dynhash, in _next iterator form. + + See ctf_dynhash_next for notes on error returns, etc. + + Sort keys before iterating over them using the SORT_FUN and SORT_ARG. + + If SORT_FUN is null, thunks to ctf_dynhash_next. */ +int +ctf_dynhash_next_sorted (ctf_dynhash_t *h, ctf_next_t **it, void **key, + void **value, ctf_hash_sort_f sort_fun, void *sort_arg) +{ + ctf_next_t *i = *it; + + if (sort_fun == NULL) + return ctf_dynhash_next (h, it, key, value); + + if (!i) + { + size_t els = ctf_dynhash_elements (h); + ctf_next_t *accum_i = NULL; + void *key, *value; + int err; + ctf_next_hkv_t *walk; + + if (((ssize_t) els) < 0) + return EDOM; + + if ((i = ctf_next_create ()) == NULL) + return ENOMEM; + + if ((i->u.ctn_sorted_hkv = calloc (els, sizeof (ctf_next_hkv_t))) == NULL) + { + ctf_next_destroy (i); + return ENOMEM; + } + walk = i->u.ctn_sorted_hkv; + + i->cu.ctn_h = h; + + while ((err = ctf_dynhash_next (h, &accum_i, &key, &value)) == 0) + { + walk->hkv_key = key; + walk->hkv_value = value; + walk++; + } + if (err != ECTF_NEXT_END) + { + ctf_next_destroy (i); + return err; + } + + if (sort_fun) + ctf_qsort_r (i->u.ctn_sorted_hkv, els, sizeof (ctf_next_hkv_t), + (int (*) (const void *, const void *, void *)) sort_fun, + sort_arg); + i->ctn_n = 0; + i->ctn_size = (ssize_t) els; + i->ctn_iter_fun = (void (*) (void)) ctf_dynhash_next_sorted; + *it = i; + } + + if ((void (*) (void)) ctf_dynhash_next_sorted != i->ctn_iter_fun) + return ECTF_NEXT_WRONGFUN; + + if (h != i->cu.ctn_h) + return ECTF_NEXT_WRONGFP; + + if ((ssize_t) i->ctn_n == i->ctn_size) + { + ctf_next_destroy (i); + *it = NULL; + return ECTF_NEXT_END; + } + + if (key) + *key = i->u.ctn_sorted_hkv[i->ctn_n].hkv_key; + if (value) + *value = i->u.ctn_sorted_hkv[i->ctn_n].hkv_value; + i->ctn_n++; + return 0; +} + void ctf_dynhash_destroy (ctf_dynhash_t *hp) { @@ -485,6 +663,12 @@ ctf_dynset_lookup (ctf_dynset_t *hp, const void *key) return NULL; } +size_t +ctf_dynset_elements (ctf_dynset_t *hp) +{ + return htab_elements ((struct htab *) hp); +} + /* TRUE/FALSE return. */ int ctf_dynset_exists (ctf_dynset_t *hp, const void *key, const void **orig_key) @@ -515,6 +699,74 @@ ctf_dynset_lookup_any (ctf_dynset_t *hp) return NULL; } +/* Traverse a dynset in arbitrary order, in _next iterator form. + + Otherwise, just like ctf_dynhash_next. */ +int +ctf_dynset_next (ctf_dynset_t *hp, ctf_next_t **it, void **key) +{ + struct htab *htab = (struct htab *) hp; + ctf_next_t *i = *it; + void *slot; + + if (!i) + { + size_t size = htab_size (htab); + + /* If the table has too many entries to fit in an ssize_t, just give up. + This might be spurious, but if any type-related hashtable has ever been + nearly as large as that then somthing very odd is going on. */ + + if (((ssize_t) size) < 0) + return EDOM; + + if ((i = ctf_next_create ()) == NULL) + return ENOMEM; + + i->u.ctn_hash_slot = htab->entries; + i->cu.ctn_s = hp; + i->ctn_n = 0; + i->ctn_size = (ssize_t) size; + i->ctn_iter_fun = (void (*) (void)) ctf_dynset_next; + *it = i; + } + + if ((void (*) (void)) ctf_dynset_next != i->ctn_iter_fun) + return ECTF_NEXT_WRONGFUN; + + if (hp != i->cu.ctn_s) + return ECTF_NEXT_WRONGFP; + + if ((ssize_t) i->ctn_n == i->ctn_size) + goto set_end; + + while ((ssize_t) i->ctn_n < i->ctn_size + && (*i->u.ctn_hash_slot == HTAB_EMPTY_ENTRY + || *i->u.ctn_hash_slot == HTAB_DELETED_ENTRY)) + { + i->u.ctn_hash_slot++; + i->ctn_n++; + } + + if ((ssize_t) i->ctn_n == i->ctn_size) + goto set_end; + + slot = *i->u.ctn_hash_slot; + + if (key) + *key = internal_to_key (slot); + + i->u.ctn_hash_slot++; + i->ctn_n++; + + return 0; + + set_end: + ctf_next_destroy (i); + *it = NULL; + return ECTF_NEXT_END; +} + /* ctf_hash, used for fixed-size maps from const char * -> ctf_id_t without removal. This is a straight cast of a hashtab. */ @@ -533,7 +785,7 @@ ctf_hash_size (const ctf_hash_t *hp) } int -ctf_hash_insert_type (ctf_hash_t *hp, ctf_file_t *fp, uint32_t type, +ctf_hash_insert_type (ctf_hash_t *hp, ctf_dict_t *fp, uint32_t type, uint32_t name) { const char *str = ctf_strraw (fp, name); @@ -563,7 +815,7 @@ ctf_hash_insert_type (ctf_hash_t *hp, ctf_file_t *fp, uint32_t type, this new official definition. If the key is not present, then call ctf_hash_insert_type and hash it in. */ int -ctf_hash_define_type (ctf_hash_t *hp, ctf_file_t *fp, uint32_t type, +ctf_hash_define_type (ctf_hash_t *hp, ctf_dict_t *fp, uint32_t type, uint32_t name) { /* This matches the semantics of ctf_hash_insert_type in this @@ -573,7 +825,7 @@ ctf_hash_define_type (ctf_hash_t *hp, ctf_file_t *fp, uint32_t type, } ctf_id_t -ctf_hash_lookup_type (ctf_hash_t *hp, ctf_file_t *fp __attribute__ ((__unused__)), +ctf_hash_lookup_type (ctf_hash_t *hp, ctf_dict_t *fp __attribute__ ((__unused__)), const char *key) { ctf_helem_t **slot; @@ -581,7 +833,7 @@ ctf_hash_lookup_type (ctf_hash_t *hp, ctf_file_t *fp __attribute__ ((__unused__) slot = ctf_hashtab_lookup ((struct htab *) hp, key, NO_INSERT); if (slot) - return (ctf_id_t) ((*slot)->value); + return (ctf_id_t) (uintptr_t) ((*slot)->value); return 0; }