Automatic date update in version.in
[deliverable/binutils-gdb.git] / libctf / ctf-create.c
1 /* CTF dict creation.
2 Copyright (C) 2019-2021 Free Software Foundation, Inc.
3
4 This file is part of libctf.
5
6 libctf is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14 See the GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; see the file COPYING. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include <ctf-impl.h>
21 #include <sys/param.h>
22 #include <string.h>
23 #include <unistd.h>
24
25 #ifndef EOVERFLOW
26 #define EOVERFLOW ERANGE
27 #endif
28
29 #ifndef roundup
30 #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
31 #endif
32
33 /* The initial size of a dynamic type's vlen in members. Arbitrary: the bigger
34 this is, the less allocation needs to be done for small structure
35 initialization, and the more memory is wasted for small structures during CTF
36 construction. No effect on generated CTF or ctf_open()ed CTF. */
37 #define INITIAL_VLEN 16
38
39 /* Make sure the ptrtab has enough space for at least one more type.
40
41 We start with 4KiB of ptrtab, enough for a thousand types, then grow it 25%
42 at a time. */
43
44 static int
45 ctf_grow_ptrtab (ctf_dict_t *fp)
46 {
47 size_t new_ptrtab_len = fp->ctf_ptrtab_len;
48
49 /* We allocate one more ptrtab entry than we need, for the initial zero,
50 plus one because the caller will probably allocate a new type. */
51
52 if (fp->ctf_ptrtab == NULL)
53 new_ptrtab_len = 1024;
54 else if ((fp->ctf_typemax + 2) > fp->ctf_ptrtab_len)
55 new_ptrtab_len = fp->ctf_ptrtab_len * 1.25;
56
57 if (new_ptrtab_len != fp->ctf_ptrtab_len)
58 {
59 uint32_t *new_ptrtab;
60
61 if ((new_ptrtab = realloc (fp->ctf_ptrtab,
62 new_ptrtab_len * sizeof (uint32_t))) == NULL)
63 return (ctf_set_errno (fp, ENOMEM));
64
65 fp->ctf_ptrtab = new_ptrtab;
66 memset (fp->ctf_ptrtab + fp->ctf_ptrtab_len, 0,
67 (new_ptrtab_len - fp->ctf_ptrtab_len) * sizeof (uint32_t));
68 fp->ctf_ptrtab_len = new_ptrtab_len;
69 }
70 return 0;
71 }
72
73 /* Make sure a vlen has enough space: expand it otherwise. Unlike the ptrtab,
74 which grows quite slowly, the vlen grows in big jumps because it is quite
75 expensive to expand: the caller has to scan the old vlen for string refs
76 first and remove them, then re-add them afterwards. The initial size is
77 more or less arbitrary. */
78 static int
79 ctf_grow_vlen (ctf_dict_t *fp, ctf_dtdef_t *dtd, size_t vlen)
80 {
81 unsigned char *old = dtd->dtd_vlen;
82
83 if (dtd->dtd_vlen_alloc > vlen)
84 return 0;
85
86 if ((dtd->dtd_vlen = realloc (dtd->dtd_vlen,
87 dtd->dtd_vlen_alloc * 2)) == NULL)
88 {
89 dtd->dtd_vlen = old;
90 return (ctf_set_errno (fp, ENOMEM));
91 }
92 memset (dtd->dtd_vlen + dtd->dtd_vlen_alloc, 0, dtd->dtd_vlen_alloc);
93 dtd->dtd_vlen_alloc *= 2;
94 return 0;
95 }
96
97 /* To create an empty CTF dict, we just declare a zeroed header and call
98 ctf_bufopen() on it. If ctf_bufopen succeeds, we mark the new dict r/w and
99 initialize the dynamic members. We start assigning type IDs at 1 because
100 type ID 0 is used as a sentinel and a not-found indicator. */
101
102 ctf_dict_t *
103 ctf_create (int *errp)
104 {
105 static const ctf_header_t hdr = { .cth_preamble = { CTF_MAGIC, CTF_VERSION, 0 } };
106
107 ctf_dynhash_t *dthash;
108 ctf_dynhash_t *dvhash;
109 ctf_dynhash_t *structs = NULL, *unions = NULL, *enums = NULL, *names = NULL;
110 ctf_dynhash_t *objthash = NULL, *funchash = NULL;
111 ctf_sect_t cts;
112 ctf_dict_t *fp;
113
114 libctf_init_debug();
115 dthash = ctf_dynhash_create (ctf_hash_integer, ctf_hash_eq_integer,
116 NULL, NULL);
117 if (dthash == NULL)
118 {
119 ctf_set_open_errno (errp, EAGAIN);
120 goto err;
121 }
122
123 dvhash = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
124 NULL, NULL);
125 if (dvhash == NULL)
126 {
127 ctf_set_open_errno (errp, EAGAIN);
128 goto err_dt;
129 }
130
131 structs = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
132 NULL, NULL);
133 unions = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
134 NULL, NULL);
135 enums = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
136 NULL, NULL);
137 names = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
138 NULL, NULL);
139 objthash = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
140 free, NULL);
141 funchash = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
142 free, NULL);
143 if (!structs || !unions || !enums || !names)
144 {
145 ctf_set_open_errno (errp, EAGAIN);
146 goto err_dv;
147 }
148
149 cts.cts_name = _CTF_SECTION;
150 cts.cts_data = &hdr;
151 cts.cts_size = sizeof (hdr);
152 cts.cts_entsize = 1;
153
154 if ((fp = ctf_bufopen_internal (&cts, NULL, NULL, NULL, 1, errp)) == NULL)
155 goto err_dv;
156
157 fp->ctf_structs.ctn_writable = structs;
158 fp->ctf_unions.ctn_writable = unions;
159 fp->ctf_enums.ctn_writable = enums;
160 fp->ctf_names.ctn_writable = names;
161 fp->ctf_objthash = objthash;
162 fp->ctf_funchash = funchash;
163 fp->ctf_dthash = dthash;
164 fp->ctf_dvhash = dvhash;
165 fp->ctf_dtoldid = 0;
166 fp->ctf_snapshots = 1;
167 fp->ctf_snapshot_lu = 0;
168 fp->ctf_flags |= LCTF_DIRTY;
169
170 ctf_set_ctl_hashes (fp);
171 ctf_setmodel (fp, CTF_MODEL_NATIVE);
172 if (ctf_grow_ptrtab (fp) < 0)
173 {
174 ctf_set_open_errno (errp, ctf_errno (fp));
175 ctf_dict_close (fp);
176 return NULL;
177 }
178
179 return fp;
180
181 err_dv:
182 ctf_dynhash_destroy (structs);
183 ctf_dynhash_destroy (unions);
184 ctf_dynhash_destroy (enums);
185 ctf_dynhash_destroy (names);
186 ctf_dynhash_destroy (objthash);
187 ctf_dynhash_destroy (funchash);
188 ctf_dynhash_destroy (dvhash);
189 err_dt:
190 ctf_dynhash_destroy (dthash);
191 err:
192 return NULL;
193 }
194
195 /* Compatibility: just update the threshold for ctf_discard. */
196 int
197 ctf_update (ctf_dict_t *fp)
198 {
199 if (!(fp->ctf_flags & LCTF_RDWR))
200 return (ctf_set_errno (fp, ECTF_RDONLY));
201
202 fp->ctf_dtoldid = fp->ctf_typemax;
203 return 0;
204 }
205
206 ctf_names_t *
207 ctf_name_table (ctf_dict_t *fp, int kind)
208 {
209 switch (kind)
210 {
211 case CTF_K_STRUCT:
212 return &fp->ctf_structs;
213 case CTF_K_UNION:
214 return &fp->ctf_unions;
215 case CTF_K_ENUM:
216 return &fp->ctf_enums;
217 default:
218 return &fp->ctf_names;
219 }
220 }
221
222 int
223 ctf_dtd_insert (ctf_dict_t *fp, ctf_dtdef_t *dtd, int flag, int kind)
224 {
225 const char *name;
226 if (ctf_dynhash_insert (fp->ctf_dthash, (void *) (uintptr_t) dtd->dtd_type,
227 dtd) < 0)
228 {
229 ctf_set_errno (fp, ENOMEM);
230 return -1;
231 }
232
233 if (flag == CTF_ADD_ROOT && dtd->dtd_data.ctt_name
234 && (name = ctf_strraw (fp, dtd->dtd_data.ctt_name)) != NULL)
235 {
236 if (ctf_dynhash_insert (ctf_name_table (fp, kind)->ctn_writable,
237 (char *) name, (void *) (uintptr_t)
238 dtd->dtd_type) < 0)
239 {
240 ctf_dynhash_remove (fp->ctf_dthash, (void *) (uintptr_t)
241 dtd->dtd_type);
242 ctf_set_errno (fp, ENOMEM);
243 return -1;
244 }
245 }
246 ctf_list_append (&fp->ctf_dtdefs, dtd);
247 return 0;
248 }
249
250 void
251 ctf_dtd_delete (ctf_dict_t *fp, ctf_dtdef_t *dtd)
252 {
253 int kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
254 size_t vlen = LCTF_INFO_VLEN (fp, dtd->dtd_data.ctt_info);
255 int name_kind = kind;
256 const char *name;
257
258 ctf_dynhash_remove (fp->ctf_dthash, (void *) (uintptr_t) dtd->dtd_type);
259
260 switch (kind)
261 {
262 case CTF_K_STRUCT:
263 case CTF_K_UNION:
264 {
265 ctf_lmember_t *memb = (ctf_lmember_t *) dtd->dtd_vlen;
266 size_t i;
267
268 for (i = 0; i < vlen; i++)
269 ctf_str_remove_ref (fp, ctf_strraw (fp, memb[i].ctlm_name),
270 &memb[i].ctlm_name);
271 }
272 break;
273 case CTF_K_ENUM:
274 {
275 ctf_enum_t *en = (ctf_enum_t *) dtd->dtd_vlen;
276 size_t i;
277
278 for (i = 0; i < vlen; i++)
279 ctf_str_remove_ref (fp, ctf_strraw (fp, en[i].cte_name),
280 &en[i].cte_name);
281 }
282 break;
283 case CTF_K_FORWARD:
284 name_kind = dtd->dtd_data.ctt_type;
285 break;
286 }
287 free (dtd->dtd_vlen);
288 dtd->dtd_vlen_alloc = 0;
289
290 if (dtd->dtd_data.ctt_name
291 && (name = ctf_strraw (fp, dtd->dtd_data.ctt_name)) != NULL
292 && LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info))
293 {
294 ctf_dynhash_remove (ctf_name_table (fp, name_kind)->ctn_writable,
295 name);
296 ctf_str_remove_ref (fp, name, &dtd->dtd_data.ctt_name);
297 }
298
299 ctf_list_delete (&fp->ctf_dtdefs, dtd);
300 free (dtd);
301 }
302
303 ctf_dtdef_t *
304 ctf_dtd_lookup (const ctf_dict_t *fp, ctf_id_t type)
305 {
306 return (ctf_dtdef_t *)
307 ctf_dynhash_lookup (fp->ctf_dthash, (void *) (uintptr_t) type);
308 }
309
310 ctf_dtdef_t *
311 ctf_dynamic_type (const ctf_dict_t *fp, ctf_id_t id)
312 {
313 ctf_id_t idx;
314
315 if (!(fp->ctf_flags & LCTF_RDWR))
316 return NULL;
317
318 if ((fp->ctf_flags & LCTF_CHILD) && LCTF_TYPE_ISPARENT (fp, id))
319 fp = fp->ctf_parent;
320
321 idx = LCTF_TYPE_TO_INDEX(fp, id);
322
323 if ((unsigned long) idx <= fp->ctf_typemax)
324 return ctf_dtd_lookup (fp, id);
325 return NULL;
326 }
327
328 int
329 ctf_dvd_insert (ctf_dict_t *fp, ctf_dvdef_t *dvd)
330 {
331 if (ctf_dynhash_insert (fp->ctf_dvhash, dvd->dvd_name, dvd) < 0)
332 {
333 ctf_set_errno (fp, ENOMEM);
334 return -1;
335 }
336 ctf_list_append (&fp->ctf_dvdefs, dvd);
337 return 0;
338 }
339
340 void
341 ctf_dvd_delete (ctf_dict_t *fp, ctf_dvdef_t *dvd)
342 {
343 ctf_dynhash_remove (fp->ctf_dvhash, dvd->dvd_name);
344 free (dvd->dvd_name);
345
346 ctf_list_delete (&fp->ctf_dvdefs, dvd);
347 free (dvd);
348 }
349
350 ctf_dvdef_t *
351 ctf_dvd_lookup (const ctf_dict_t *fp, const char *name)
352 {
353 return (ctf_dvdef_t *) ctf_dynhash_lookup (fp->ctf_dvhash, name);
354 }
355
356 /* Discard all of the dynamic type definitions and variable definitions that
357 have been added to the dict since the last call to ctf_update(). We locate
358 such types by scanning the dtd list and deleting elements that have type IDs
359 greater than ctf_dtoldid, which is set by ctf_update(), above, and by
360 scanning the variable list and deleting elements that have update IDs equal
361 to the current value of the last-update snapshot count (indicating that they
362 were added after the most recent call to ctf_update()). */
363 int
364 ctf_discard (ctf_dict_t *fp)
365 {
366 ctf_snapshot_id_t last_update =
367 { fp->ctf_dtoldid,
368 fp->ctf_snapshot_lu + 1 };
369
370 /* Update required? */
371 if (!(fp->ctf_flags & LCTF_DIRTY))
372 return 0;
373
374 return (ctf_rollback (fp, last_update));
375 }
376
377 ctf_snapshot_id_t
378 ctf_snapshot (ctf_dict_t *fp)
379 {
380 ctf_snapshot_id_t snapid;
381 snapid.dtd_id = fp->ctf_typemax;
382 snapid.snapshot_id = fp->ctf_snapshots++;
383 return snapid;
384 }
385
386 /* Like ctf_discard(), only discards everything after a particular ID. */
387 int
388 ctf_rollback (ctf_dict_t *fp, ctf_snapshot_id_t id)
389 {
390 ctf_dtdef_t *dtd, *ntd;
391 ctf_dvdef_t *dvd, *nvd;
392
393 if (!(fp->ctf_flags & LCTF_RDWR))
394 return (ctf_set_errno (fp, ECTF_RDONLY));
395
396 if (fp->ctf_snapshot_lu >= id.snapshot_id)
397 return (ctf_set_errno (fp, ECTF_OVERROLLBACK));
398
399 for (dtd = ctf_list_next (&fp->ctf_dtdefs); dtd != NULL; dtd = ntd)
400 {
401 int kind;
402 const char *name;
403
404 ntd = ctf_list_next (dtd);
405
406 if (LCTF_TYPE_TO_INDEX (fp, dtd->dtd_type) <= id.dtd_id)
407 continue;
408
409 kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
410 if (kind == CTF_K_FORWARD)
411 kind = dtd->dtd_data.ctt_type;
412
413 if (dtd->dtd_data.ctt_name
414 && (name = ctf_strraw (fp, dtd->dtd_data.ctt_name)) != NULL
415 && LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info))
416 {
417 ctf_dynhash_remove (ctf_name_table (fp, kind)->ctn_writable,
418 name);
419 ctf_str_remove_ref (fp, name, &dtd->dtd_data.ctt_name);
420 }
421
422 ctf_dynhash_remove (fp->ctf_dthash, (void *) (uintptr_t) dtd->dtd_type);
423 ctf_dtd_delete (fp, dtd);
424 }
425
426 for (dvd = ctf_list_next (&fp->ctf_dvdefs); dvd != NULL; dvd = nvd)
427 {
428 nvd = ctf_list_next (dvd);
429
430 if (dvd->dvd_snapshots <= id.snapshot_id)
431 continue;
432
433 ctf_dvd_delete (fp, dvd);
434 }
435
436 fp->ctf_typemax = id.dtd_id;
437 fp->ctf_snapshots = id.snapshot_id;
438
439 if (fp->ctf_snapshots == fp->ctf_snapshot_lu)
440 fp->ctf_flags &= ~LCTF_DIRTY;
441
442 return 0;
443 }
444
445 /* Note: vlen is the amount of space *allocated* for the vlen. It may well not
446 be the amount of space used (yet): the space used is declared in per-kind
447 fashion in the dtd_data's info word. */
448 static ctf_id_t
449 ctf_add_generic (ctf_dict_t *fp, uint32_t flag, const char *name, int kind,
450 size_t vlen, ctf_dtdef_t **rp)
451 {
452 ctf_dtdef_t *dtd;
453 ctf_id_t type;
454
455 if (flag != CTF_ADD_NONROOT && flag != CTF_ADD_ROOT)
456 return (ctf_set_errno (fp, EINVAL));
457
458 if (!(fp->ctf_flags & LCTF_RDWR))
459 return (ctf_set_errno (fp, ECTF_RDONLY));
460
461 if (LCTF_INDEX_TO_TYPE (fp, fp->ctf_typemax, 1) >= CTF_MAX_TYPE)
462 return (ctf_set_errno (fp, ECTF_FULL));
463
464 if (LCTF_INDEX_TO_TYPE (fp, fp->ctf_typemax, 1) == (CTF_MAX_PTYPE - 1))
465 return (ctf_set_errno (fp, ECTF_FULL));
466
467 /* Make sure ptrtab always grows to be big enough for all types. */
468 if (ctf_grow_ptrtab (fp) < 0)
469 return CTF_ERR; /* errno is set for us. */
470
471 if ((dtd = calloc (1, sizeof (ctf_dtdef_t))) == NULL)
472 return (ctf_set_errno (fp, EAGAIN));
473
474 dtd->dtd_vlen_alloc = vlen;
475 if (vlen > 0)
476 {
477 if ((dtd->dtd_vlen = calloc (1, vlen)) == NULL)
478 goto oom;
479 }
480 else
481 dtd->dtd_vlen = NULL;
482
483 type = ++fp->ctf_typemax;
484 type = LCTF_INDEX_TO_TYPE (fp, type, (fp->ctf_flags & LCTF_CHILD));
485
486 dtd->dtd_data.ctt_name = ctf_str_add_pending (fp, name,
487 &dtd->dtd_data.ctt_name);
488 dtd->dtd_type = type;
489
490 if (dtd->dtd_data.ctt_name == 0 && name != NULL && name[0] != '\0')
491 goto oom;
492
493 if (ctf_dtd_insert (fp, dtd, flag, kind) < 0)
494 goto err; /* errno is set for us. */
495
496 fp->ctf_flags |= LCTF_DIRTY;
497
498 *rp = dtd;
499 return type;
500
501 oom:
502 ctf_set_errno (fp, EAGAIN);
503 err:
504 free (dtd->dtd_vlen);
505 free (dtd);
506 return CTF_ERR;
507 }
508
509 /* When encoding integer sizes, we want to convert a byte count in the range
510 1-8 to the closest power of 2 (e.g. 3->4, 5->8, etc). The clp2() function
511 is a clever implementation from "Hacker's Delight" by Henry Warren, Jr. */
512 static size_t
513 clp2 (size_t x)
514 {
515 x--;
516
517 x |= (x >> 1);
518 x |= (x >> 2);
519 x |= (x >> 4);
520 x |= (x >> 8);
521 x |= (x >> 16);
522
523 return (x + 1);
524 }
525
526 ctf_id_t
527 ctf_add_encoded (ctf_dict_t *fp, uint32_t flag,
528 const char *name, const ctf_encoding_t *ep, uint32_t kind)
529 {
530 ctf_dtdef_t *dtd;
531 ctf_id_t type;
532 uint32_t encoding;
533
534 if (ep == NULL)
535 return (ctf_set_errno (fp, EINVAL));
536
537 if (name == NULL || name[0] == '\0')
538 return (ctf_set_errno (fp, ECTF_NONAME));
539
540 if (!ctf_assert (fp, kind == CTF_K_INTEGER || kind == CTF_K_FLOAT))
541 return -1; /* errno is set for us. */
542
543 if ((type = ctf_add_generic (fp, flag, name, kind, sizeof (uint32_t),
544 &dtd)) == CTF_ERR)
545 return CTF_ERR; /* errno is set for us. */
546
547 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, flag, 0);
548 dtd->dtd_data.ctt_size = clp2 (P2ROUNDUP (ep->cte_bits, CHAR_BIT)
549 / CHAR_BIT);
550 switch (kind)
551 {
552 case CTF_K_INTEGER:
553 encoding = CTF_INT_DATA (ep->cte_format, ep->cte_offset, ep->cte_bits);
554 break;
555 case CTF_K_FLOAT:
556 encoding = CTF_FP_DATA (ep->cte_format, ep->cte_offset, ep->cte_bits);
557 break;
558 }
559 memcpy (dtd->dtd_vlen, &encoding, sizeof (encoding));
560
561 return type;
562 }
563
564 ctf_id_t
565 ctf_add_reftype (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref, uint32_t kind)
566 {
567 ctf_dtdef_t *dtd;
568 ctf_id_t type;
569 ctf_dict_t *tmp = fp;
570 int child = fp->ctf_flags & LCTF_CHILD;
571
572 if (ref == CTF_ERR || ref > CTF_MAX_TYPE)
573 return (ctf_set_errno (fp, EINVAL));
574
575 if (ref != 0 && ctf_lookup_by_id (&tmp, ref) == NULL)
576 return CTF_ERR; /* errno is set for us. */
577
578 if ((type = ctf_add_generic (fp, flag, NULL, kind, 0, &dtd)) == CTF_ERR)
579 return CTF_ERR; /* errno is set for us. */
580
581 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, flag, 0);
582 dtd->dtd_data.ctt_type = (uint32_t) ref;
583
584 if (kind != CTF_K_POINTER)
585 return type;
586
587 /* If we are adding a pointer, update the ptrtab, pointing at this type from
588 the type it points to. Note that ctf_typemax is at this point one higher
589 than we want to check against, because it's just been incremented for the
590 addition of this type. The pptrtab is lazily-updated as needed, so is not
591 touched here. */
592
593 uint32_t type_idx = LCTF_TYPE_TO_INDEX (fp, type);
594 uint32_t ref_idx = LCTF_TYPE_TO_INDEX (fp, ref);
595
596 if (LCTF_TYPE_ISCHILD (fp, ref) == child
597 && ref_idx < fp->ctf_typemax)
598 fp->ctf_ptrtab[ref_idx] = type_idx;
599
600 return type;
601 }
602
603 ctf_id_t
604 ctf_add_slice (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref,
605 const ctf_encoding_t *ep)
606 {
607 ctf_dtdef_t *dtd;
608 ctf_slice_t slice;
609 ctf_id_t resolved_ref = ref;
610 ctf_id_t type;
611 int kind;
612 const ctf_type_t *tp;
613 ctf_dict_t *tmp = fp;
614
615 if (ep == NULL)
616 return (ctf_set_errno (fp, EINVAL));
617
618 if ((ep->cte_bits > 255) || (ep->cte_offset > 255))
619 return (ctf_set_errno (fp, ECTF_SLICEOVERFLOW));
620
621 if (ref == CTF_ERR || ref > CTF_MAX_TYPE)
622 return (ctf_set_errno (fp, EINVAL));
623
624 if (ref != 0 && ((tp = ctf_lookup_by_id (&tmp, ref)) == NULL))
625 return CTF_ERR; /* errno is set for us. */
626
627 /* Make sure we ultimately point to an integral type. We also allow slices to
628 point to the unimplemented type, for now, because the compiler can emit
629 such slices, though they're not very much use. */
630
631 resolved_ref = ctf_type_resolve_unsliced (tmp, ref);
632 kind = ctf_type_kind_unsliced (tmp, resolved_ref);
633
634 if ((kind != CTF_K_INTEGER) && (kind != CTF_K_FLOAT) &&
635 (kind != CTF_K_ENUM)
636 && (ref != 0))
637 return (ctf_set_errno (fp, ECTF_NOTINTFP));
638
639 if ((type = ctf_add_generic (fp, flag, NULL, CTF_K_SLICE,
640 sizeof (ctf_slice_t), &dtd)) == CTF_ERR)
641 return CTF_ERR; /* errno is set for us. */
642
643 memset (&slice, 0, sizeof (ctf_slice_t));
644
645 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_SLICE, flag, 0);
646 dtd->dtd_data.ctt_size = clp2 (P2ROUNDUP (ep->cte_bits, CHAR_BIT)
647 / CHAR_BIT);
648 slice.cts_type = (uint32_t) ref;
649 slice.cts_bits = ep->cte_bits;
650 slice.cts_offset = ep->cte_offset;
651 memcpy (dtd->dtd_vlen, &slice, sizeof (ctf_slice_t));
652
653 return type;
654 }
655
656 ctf_id_t
657 ctf_add_integer (ctf_dict_t *fp, uint32_t flag,
658 const char *name, const ctf_encoding_t *ep)
659 {
660 return (ctf_add_encoded (fp, flag, name, ep, CTF_K_INTEGER));
661 }
662
663 ctf_id_t
664 ctf_add_float (ctf_dict_t *fp, uint32_t flag,
665 const char *name, const ctf_encoding_t *ep)
666 {
667 return (ctf_add_encoded (fp, flag, name, ep, CTF_K_FLOAT));
668 }
669
670 ctf_id_t
671 ctf_add_pointer (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref)
672 {
673 return (ctf_add_reftype (fp, flag, ref, CTF_K_POINTER));
674 }
675
676 ctf_id_t
677 ctf_add_array (ctf_dict_t *fp, uint32_t flag, const ctf_arinfo_t *arp)
678 {
679 ctf_dtdef_t *dtd;
680 ctf_array_t cta;
681 ctf_id_t type;
682 ctf_dict_t *tmp = fp;
683
684 if (arp == NULL)
685 return (ctf_set_errno (fp, EINVAL));
686
687 if (arp->ctr_contents != 0
688 && ctf_lookup_by_id (&tmp, arp->ctr_contents) == NULL)
689 return CTF_ERR; /* errno is set for us. */
690
691 tmp = fp;
692 if (ctf_lookup_by_id (&tmp, arp->ctr_index) == NULL)
693 return CTF_ERR; /* errno is set for us. */
694
695 if (ctf_type_kind (fp, arp->ctr_index) == CTF_K_FORWARD)
696 {
697 ctf_err_warn (fp, 1, ECTF_INCOMPLETE,
698 _("ctf_add_array: index type %lx is incomplete"),
699 arp->ctr_contents);
700 return (ctf_set_errno (fp, ECTF_INCOMPLETE));
701 }
702
703 if ((type = ctf_add_generic (fp, flag, NULL, CTF_K_ARRAY,
704 sizeof (ctf_array_t), &dtd)) == CTF_ERR)
705 return CTF_ERR; /* errno is set for us. */
706
707 memset (&cta, 0, sizeof (ctf_array_t));
708
709 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_ARRAY, flag, 0);
710 dtd->dtd_data.ctt_size = 0;
711 cta.cta_contents = (uint32_t) arp->ctr_contents;
712 cta.cta_index = (uint32_t) arp->ctr_index;
713 cta.cta_nelems = arp->ctr_nelems;
714 memcpy (dtd->dtd_vlen, &cta, sizeof (ctf_array_t));
715
716 return type;
717 }
718
719 int
720 ctf_set_array (ctf_dict_t *fp, ctf_id_t type, const ctf_arinfo_t *arp)
721 {
722 ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, type);
723 ctf_array_t *vlen;
724
725 if (!(fp->ctf_flags & LCTF_RDWR))
726 return (ctf_set_errno (fp, ECTF_RDONLY));
727
728 if (dtd == NULL
729 || LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info) != CTF_K_ARRAY)
730 return (ctf_set_errno (fp, ECTF_BADID));
731
732 vlen = (ctf_array_t *) dtd->dtd_vlen;
733 fp->ctf_flags |= LCTF_DIRTY;
734 vlen->cta_contents = (uint32_t) arp->ctr_contents;
735 vlen->cta_index = (uint32_t) arp->ctr_index;
736 vlen->cta_nelems = arp->ctr_nelems;
737
738 return 0;
739 }
740
741 ctf_id_t
742 ctf_add_function (ctf_dict_t *fp, uint32_t flag,
743 const ctf_funcinfo_t *ctc, const ctf_id_t *argv)
744 {
745 ctf_dtdef_t *dtd;
746 ctf_id_t type;
747 uint32_t vlen;
748 uint32_t *vdat;
749 ctf_dict_t *tmp = fp;
750 size_t initial_vlen;
751 size_t i;
752
753 if (!(fp->ctf_flags & LCTF_RDWR))
754 return (ctf_set_errno (fp, ECTF_RDONLY));
755
756 if (ctc == NULL || (ctc->ctc_flags & ~CTF_FUNC_VARARG) != 0
757 || (ctc->ctc_argc != 0 && argv == NULL))
758 return (ctf_set_errno (fp, EINVAL));
759
760 vlen = ctc->ctc_argc;
761 if (ctc->ctc_flags & CTF_FUNC_VARARG)
762 vlen++; /* Add trailing zero to indicate varargs (see below). */
763
764 if (ctc->ctc_return != 0
765 && ctf_lookup_by_id (&tmp, ctc->ctc_return) == NULL)
766 return CTF_ERR; /* errno is set for us. */
767
768 if (vlen > CTF_MAX_VLEN)
769 return (ctf_set_errno (fp, EOVERFLOW));
770
771 /* One word extra allocated for padding for 4-byte alignment if need be.
772 Not reflected in vlen: we don't want to copy anything into it, and
773 it's in addition to (e.g.) the trailing 0 indicating varargs. */
774
775 initial_vlen = (sizeof (uint32_t) * (vlen + (vlen & 1)));
776 if ((type = ctf_add_generic (fp, flag, NULL, CTF_K_FUNCTION,
777 initial_vlen, &dtd)) == CTF_ERR)
778 return CTF_ERR; /* errno is set for us. */
779
780 vdat = (uint32_t *) dtd->dtd_vlen;
781
782 for (i = 0; i < ctc->ctc_argc; i++)
783 {
784 tmp = fp;
785 if (argv[i] != 0 && ctf_lookup_by_id (&tmp, argv[i]) == NULL)
786 return CTF_ERR; /* errno is set for us. */
787 vdat[i] = (uint32_t) argv[i];
788 }
789
790 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_FUNCTION, flag, vlen);
791 dtd->dtd_data.ctt_type = (uint32_t) ctc->ctc_return;
792
793 if (ctc->ctc_flags & CTF_FUNC_VARARG)
794 vdat[vlen - 1] = 0; /* Add trailing zero to indicate varargs. */
795
796 return type;
797 }
798
799 ctf_id_t
800 ctf_add_struct_sized (ctf_dict_t *fp, uint32_t flag, const char *name,
801 size_t size)
802 {
803 ctf_dtdef_t *dtd;
804 ctf_id_t type = 0;
805 size_t initial_vlen = sizeof (ctf_lmember_t) * INITIAL_VLEN;
806
807 /* Promote root-visible forwards to structs. */
808 if (name != NULL)
809 type = ctf_lookup_by_rawname (fp, CTF_K_STRUCT, name);
810
811 if (type != 0 && ctf_type_kind (fp, type) == CTF_K_FORWARD)
812 dtd = ctf_dtd_lookup (fp, type);
813 else if ((type = ctf_add_generic (fp, flag, name, CTF_K_STRUCT,
814 initial_vlen, &dtd)) == CTF_ERR)
815 return CTF_ERR; /* errno is set for us. */
816
817 /* Forwards won't have any vlen yet. */
818 if (dtd->dtd_vlen_alloc == 0)
819 {
820 if ((dtd->dtd_vlen = calloc (1, initial_vlen)) == NULL)
821 return (ctf_set_errno (fp, ENOMEM));
822 dtd->dtd_vlen_alloc = initial_vlen;
823 }
824
825 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_STRUCT, flag, 0);
826 dtd->dtd_data.ctt_size = CTF_LSIZE_SENT;
827 dtd->dtd_data.ctt_lsizehi = CTF_SIZE_TO_LSIZE_HI (size);
828 dtd->dtd_data.ctt_lsizelo = CTF_SIZE_TO_LSIZE_LO (size);
829
830 return type;
831 }
832
833 ctf_id_t
834 ctf_add_struct (ctf_dict_t *fp, uint32_t flag, const char *name)
835 {
836 return (ctf_add_struct_sized (fp, flag, name, 0));
837 }
838
839 ctf_id_t
840 ctf_add_union_sized (ctf_dict_t *fp, uint32_t flag, const char *name,
841 size_t size)
842 {
843 ctf_dtdef_t *dtd;
844 ctf_id_t type = 0;
845 size_t initial_vlen = sizeof (ctf_lmember_t) * INITIAL_VLEN;
846
847 /* Promote root-visible forwards to unions. */
848 if (name != NULL)
849 type = ctf_lookup_by_rawname (fp, CTF_K_UNION, name);
850
851 if (type != 0 && ctf_type_kind (fp, type) == CTF_K_FORWARD)
852 dtd = ctf_dtd_lookup (fp, type);
853 else if ((type = ctf_add_generic (fp, flag, name, CTF_K_UNION,
854 initial_vlen, &dtd)) == CTF_ERR)
855 return CTF_ERR; /* errno is set for us */
856
857 /* Forwards won't have any vlen yet. */
858 if (dtd->dtd_vlen_alloc == 0)
859 {
860 if ((dtd->dtd_vlen = calloc (1, initial_vlen)) == NULL)
861 return (ctf_set_errno (fp, ENOMEM));
862 dtd->dtd_vlen_alloc = initial_vlen;
863 }
864
865 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_UNION, flag, 0);
866 dtd->dtd_data.ctt_size = CTF_LSIZE_SENT;
867 dtd->dtd_data.ctt_lsizehi = CTF_SIZE_TO_LSIZE_HI (size);
868 dtd->dtd_data.ctt_lsizelo = CTF_SIZE_TO_LSIZE_LO (size);
869
870 return type;
871 }
872
873 ctf_id_t
874 ctf_add_union (ctf_dict_t *fp, uint32_t flag, const char *name)
875 {
876 return (ctf_add_union_sized (fp, flag, name, 0));
877 }
878
879 ctf_id_t
880 ctf_add_enum (ctf_dict_t *fp, uint32_t flag, const char *name)
881 {
882 ctf_dtdef_t *dtd;
883 ctf_id_t type = 0;
884 size_t initial_vlen = sizeof (ctf_enum_t) * INITIAL_VLEN;
885
886 /* Promote root-visible forwards to enums. */
887 if (name != NULL)
888 type = ctf_lookup_by_rawname (fp, CTF_K_ENUM, name);
889
890 if (type != 0 && ctf_type_kind (fp, type) == CTF_K_FORWARD)
891 dtd = ctf_dtd_lookup (fp, type);
892 else if ((type = ctf_add_generic (fp, flag, name, CTF_K_ENUM,
893 initial_vlen, &dtd)) == CTF_ERR)
894 return CTF_ERR; /* errno is set for us. */
895
896 /* Forwards won't have any vlen yet. */
897 if (dtd->dtd_vlen_alloc == 0)
898 {
899 if ((dtd->dtd_vlen = calloc (1, initial_vlen)) == NULL)
900 return (ctf_set_errno (fp, ENOMEM));
901 dtd->dtd_vlen_alloc = initial_vlen;
902 }
903
904 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_ENUM, flag, 0);
905 dtd->dtd_data.ctt_size = fp->ctf_dmodel->ctd_int;
906
907 return type;
908 }
909
910 ctf_id_t
911 ctf_add_enum_encoded (ctf_dict_t *fp, uint32_t flag, const char *name,
912 const ctf_encoding_t *ep)
913 {
914 ctf_id_t type = 0;
915
916 /* First, create the enum if need be, using most of the same machinery as
917 ctf_add_enum(), to ensure that we do not allow things past that are not
918 enums or forwards to them. (This includes other slices: you cannot slice a
919 slice, which would be a useless thing to do anyway.) */
920
921 if (name != NULL)
922 type = ctf_lookup_by_rawname (fp, CTF_K_ENUM, name);
923
924 if (type != 0)
925 {
926 if ((ctf_type_kind (fp, type) != CTF_K_FORWARD) &&
927 (ctf_type_kind_unsliced (fp, type) != CTF_K_ENUM))
928 return (ctf_set_errno (fp, ECTF_NOTINTFP));
929 }
930 else if ((type = ctf_add_enum (fp, flag, name)) == CTF_ERR)
931 return CTF_ERR; /* errno is set for us. */
932
933 /* Now attach a suitable slice to it. */
934
935 return ctf_add_slice (fp, flag, type, ep);
936 }
937
938 ctf_id_t
939 ctf_add_forward (ctf_dict_t *fp, uint32_t flag, const char *name,
940 uint32_t kind)
941 {
942 ctf_dtdef_t *dtd;
943 ctf_id_t type = 0;
944
945 if (!ctf_forwardable_kind (kind))
946 return (ctf_set_errno (fp, ECTF_NOTSUE));
947
948 if (name == NULL || name[0] == '\0')
949 return (ctf_set_errno (fp, ECTF_NONAME));
950
951 /* If the type is already defined or exists as a forward tag, just
952 return the ctf_id_t of the existing definition. */
953
954 type = ctf_lookup_by_rawname (fp, kind, name);
955
956 if (type)
957 return type;
958
959 if ((type = ctf_add_generic (fp, flag, name, kind, 0, &dtd)) == CTF_ERR)
960 return CTF_ERR; /* errno is set for us. */
961
962 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_FORWARD, flag, 0);
963 dtd->dtd_data.ctt_type = kind;
964
965 return type;
966 }
967
968 ctf_id_t
969 ctf_add_unknown (ctf_dict_t *fp, uint32_t flag, const char *name)
970 {
971 ctf_dtdef_t *dtd;
972 ctf_id_t type = 0;
973
974 /* If a type is already defined with this name, error (if not CTF_K_UNKNOWN)
975 or just return it. */
976
977 if (name != NULL && name[0] != '\0' && flag == CTF_ADD_ROOT
978 && (type = ctf_lookup_by_rawname (fp, CTF_K_UNKNOWN, name)))
979 {
980 if (ctf_type_kind (fp, type) == CTF_K_UNKNOWN)
981 return type;
982 else
983 {
984 ctf_err_warn (fp, 1, ECTF_CONFLICT,
985 _("ctf_add_unknown: cannot add unknown type "
986 "named %s: type of this name already defined"),
987 name ? name : _("(unnamed type)"));
988 return (ctf_set_errno (fp, ECTF_CONFLICT));
989 }
990 }
991
992 if ((type = ctf_add_generic (fp, flag, name, CTF_K_UNKNOWN, 0, &dtd)) == CTF_ERR)
993 return CTF_ERR; /* errno is set for us. */
994
995 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_UNKNOWN, flag, 0);
996 dtd->dtd_data.ctt_type = 0;
997
998 return type;
999 }
1000
1001 ctf_id_t
1002 ctf_add_typedef (ctf_dict_t *fp, uint32_t flag, const char *name,
1003 ctf_id_t ref)
1004 {
1005 ctf_dtdef_t *dtd;
1006 ctf_id_t type;
1007 ctf_dict_t *tmp = fp;
1008
1009 if (ref == CTF_ERR || ref > CTF_MAX_TYPE)
1010 return (ctf_set_errno (fp, EINVAL));
1011
1012 if (name == NULL || name[0] == '\0')
1013 return (ctf_set_errno (fp, ECTF_NONAME));
1014
1015 if (ref != 0 && ctf_lookup_by_id (&tmp, ref) == NULL)
1016 return CTF_ERR; /* errno is set for us. */
1017
1018 if ((type = ctf_add_generic (fp, flag, name, CTF_K_TYPEDEF, 0,
1019 &dtd)) == CTF_ERR)
1020 return CTF_ERR; /* errno is set for us. */
1021
1022 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_TYPEDEF, flag, 0);
1023 dtd->dtd_data.ctt_type = (uint32_t) ref;
1024
1025 return type;
1026 }
1027
1028 ctf_id_t
1029 ctf_add_volatile (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref)
1030 {
1031 return (ctf_add_reftype (fp, flag, ref, CTF_K_VOLATILE));
1032 }
1033
1034 ctf_id_t
1035 ctf_add_const (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref)
1036 {
1037 return (ctf_add_reftype (fp, flag, ref, CTF_K_CONST));
1038 }
1039
1040 ctf_id_t
1041 ctf_add_restrict (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref)
1042 {
1043 return (ctf_add_reftype (fp, flag, ref, CTF_K_RESTRICT));
1044 }
1045
1046 int
1047 ctf_add_enumerator (ctf_dict_t *fp, ctf_id_t enid, const char *name,
1048 int value)
1049 {
1050 ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, enid);
1051 unsigned char *old_vlen;
1052 ctf_enum_t *en;
1053 size_t i;
1054
1055 uint32_t kind, vlen, root;
1056
1057 if (name == NULL)
1058 return (ctf_set_errno (fp, EINVAL));
1059
1060 if (!(fp->ctf_flags & LCTF_RDWR))
1061 return (ctf_set_errno (fp, ECTF_RDONLY));
1062
1063 if (dtd == NULL)
1064 return (ctf_set_errno (fp, ECTF_BADID));
1065
1066 kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
1067 root = LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info);
1068 vlen = LCTF_INFO_VLEN (fp, dtd->dtd_data.ctt_info);
1069
1070 if (kind != CTF_K_ENUM)
1071 return (ctf_set_errno (fp, ECTF_NOTENUM));
1072
1073 if (vlen == CTF_MAX_VLEN)
1074 return (ctf_set_errno (fp, ECTF_DTFULL));
1075
1076 old_vlen = dtd->dtd_vlen;
1077 if (ctf_grow_vlen (fp, dtd, sizeof (ctf_enum_t) * (vlen + 1)) < 0)
1078 return -1; /* errno is set for us. */
1079 en = (ctf_enum_t *) dtd->dtd_vlen;
1080
1081 if (dtd->dtd_vlen != old_vlen)
1082 {
1083 ptrdiff_t move = (signed char *) dtd->dtd_vlen - (signed char *) old_vlen;
1084
1085 /* Remove pending refs in the old vlen region and reapply them. */
1086
1087 for (i = 0; i < vlen; i++)
1088 ctf_str_move_pending (fp, &en[i].cte_name, move);
1089 }
1090
1091 for (i = 0; i < vlen; i++)
1092 if (strcmp (ctf_strptr (fp, en[i].cte_name), name) == 0)
1093 return (ctf_set_errno (fp, ECTF_DUPLICATE));
1094
1095 en[i].cte_name = ctf_str_add_pending (fp, name, &en[i].cte_name);
1096 en[i].cte_value = value;
1097
1098 if (en[i].cte_name == 0 && name != NULL && name[0] != '\0')
1099 return -1; /* errno is set for us. */
1100
1101 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, root, vlen + 1);
1102
1103 fp->ctf_flags |= LCTF_DIRTY;
1104
1105 return 0;
1106 }
1107
1108 int
1109 ctf_add_member_offset (ctf_dict_t *fp, ctf_id_t souid, const char *name,
1110 ctf_id_t type, unsigned long bit_offset)
1111 {
1112 ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, souid);
1113
1114 ssize_t msize, malign, ssize;
1115 uint32_t kind, vlen, root;
1116 size_t i;
1117 int is_incomplete = 0;
1118 unsigned char *old_vlen;
1119 ctf_lmember_t *memb;
1120
1121 if (!(fp->ctf_flags & LCTF_RDWR))
1122 return (ctf_set_errno (fp, ECTF_RDONLY));
1123
1124 if (dtd == NULL)
1125 return (ctf_set_errno (fp, ECTF_BADID));
1126
1127 if (name != NULL && name[0] == '\0')
1128 name = NULL;
1129
1130 kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
1131 root = LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info);
1132 vlen = LCTF_INFO_VLEN (fp, dtd->dtd_data.ctt_info);
1133
1134 if (kind != CTF_K_STRUCT && kind != CTF_K_UNION)
1135 return (ctf_set_errno (fp, ECTF_NOTSOU));
1136
1137 if (vlen == CTF_MAX_VLEN)
1138 return (ctf_set_errno (fp, ECTF_DTFULL));
1139
1140 old_vlen = dtd->dtd_vlen;
1141 if (ctf_grow_vlen (fp, dtd, sizeof (ctf_lmember_t) * (vlen + 1)) < 0)
1142 return -1; /* errno is set for us. */
1143 memb = (ctf_lmember_t *) dtd->dtd_vlen;
1144
1145 if (dtd->dtd_vlen != old_vlen)
1146 {
1147 ptrdiff_t move = (signed char *) dtd->dtd_vlen - (signed char *) old_vlen;
1148
1149 /* Remove pending refs in the old vlen region and reapply them. */
1150
1151 for (i = 0; i < vlen; i++)
1152 ctf_str_move_pending (fp, &memb[i].ctlm_name, move);
1153 }
1154
1155 if (name != NULL)
1156 {
1157 for (i = 0; i < vlen; i++)
1158 if (strcmp (ctf_strptr (fp, memb[i].ctlm_name), name) == 0)
1159 return (ctf_set_errno (fp, ECTF_DUPLICATE));
1160 }
1161
1162 if ((msize = ctf_type_size (fp, type)) < 0 ||
1163 (malign = ctf_type_align (fp, type)) < 0)
1164 {
1165 /* The unimplemented type, and any type that resolves to it, has no size
1166 and no alignment: it can correspond to any number of compiler-inserted
1167 types. We allow incomplete types through since they are routinely
1168 added to the ends of structures, and can even be added elsewhere in
1169 structures by the deduplicator. They are assumed to be zero-size with
1170 no alignment: this is often wrong, but problems can be avoided in this
1171 case by explicitly specifying the size of the structure via the _sized
1172 functions. The deduplicator always does this. */
1173
1174 msize = 0;
1175 malign = 0;
1176 if (ctf_errno (fp) == ECTF_NONREPRESENTABLE)
1177 ctf_set_errno (fp, 0);
1178 else if (ctf_errno (fp) == ECTF_INCOMPLETE)
1179 is_incomplete = 1;
1180 else
1181 return -1; /* errno is set for us. */
1182 }
1183
1184 memb[vlen].ctlm_name = ctf_str_add_pending (fp, name, &memb[vlen].ctlm_name);
1185 memb[vlen].ctlm_type = type;
1186 if (memb[vlen].ctlm_name == 0 && name != NULL && name[0] != '\0')
1187 return -1; /* errno is set for us. */
1188
1189 if (kind == CTF_K_STRUCT && vlen != 0)
1190 {
1191 if (bit_offset == (unsigned long) - 1)
1192 {
1193 /* Natural alignment. */
1194
1195 ctf_id_t ltype = ctf_type_resolve (fp, memb[vlen - 1].ctlm_type);
1196 size_t off = CTF_LMEM_OFFSET(&memb[vlen - 1]);
1197
1198 ctf_encoding_t linfo;
1199 ssize_t lsize;
1200
1201 /* Propagate any error from ctf_type_resolve. If the last member was
1202 of unimplemented type, this may be -ECTF_NONREPRESENTABLE: we
1203 cannot insert right after such a member without explicit offset
1204 specification, because its alignment and size is not known. */
1205 if (ltype == CTF_ERR)
1206 return -1; /* errno is set for us. */
1207
1208 if (is_incomplete)
1209 {
1210 ctf_err_warn (fp, 1, ECTF_INCOMPLETE,
1211 _("ctf_add_member_offset: cannot add member %s of "
1212 "incomplete type %lx to struct %lx without "
1213 "specifying explicit offset\n"),
1214 name ? name : _("(unnamed member)"), type, souid);
1215 return (ctf_set_errno (fp, ECTF_INCOMPLETE));
1216 }
1217
1218 if (ctf_type_encoding (fp, ltype, &linfo) == 0)
1219 off += linfo.cte_bits;
1220 else if ((lsize = ctf_type_size (fp, ltype)) > 0)
1221 off += lsize * CHAR_BIT;
1222 else if (lsize == -1 && ctf_errno (fp) == ECTF_INCOMPLETE)
1223 {
1224 const char *lname = ctf_strraw (fp, memb[vlen - 1].ctlm_name);
1225
1226 ctf_err_warn (fp, 1, ECTF_INCOMPLETE,
1227 _("ctf_add_member_offset: cannot add member %s of "
1228 "type %lx to struct %lx without specifying "
1229 "explicit offset after member %s of type %lx, "
1230 "which is an incomplete type\n"),
1231 name ? name : _("(unnamed member)"), type, souid,
1232 lname ? lname : _("(unnamed member)"), ltype);
1233 return -1; /* errno is set for us. */
1234 }
1235
1236 /* Round up the offset of the end of the last member to
1237 the next byte boundary, convert 'off' to bytes, and
1238 then round it up again to the next multiple of the
1239 alignment required by the new member. Finally,
1240 convert back to bits and store the result in
1241 dmd_offset. Technically we could do more efficient
1242 packing if the new member is a bit-field, but we're
1243 the "compiler" and ANSI says we can do as we choose. */
1244
1245 off = roundup (off, CHAR_BIT) / CHAR_BIT;
1246 off = roundup (off, MAX (malign, 1));
1247 memb[vlen].ctlm_offsethi = CTF_OFFSET_TO_LMEMHI (off * CHAR_BIT);
1248 memb[vlen].ctlm_offsetlo = CTF_OFFSET_TO_LMEMLO (off * CHAR_BIT);
1249 ssize = off + msize;
1250 }
1251 else
1252 {
1253 /* Specified offset in bits. */
1254
1255 memb[vlen].ctlm_offsethi = CTF_OFFSET_TO_LMEMHI (bit_offset);
1256 memb[vlen].ctlm_offsetlo = CTF_OFFSET_TO_LMEMLO (bit_offset);
1257 ssize = ctf_get_ctt_size (fp, &dtd->dtd_data, NULL, NULL);
1258 ssize = MAX (ssize, ((signed) bit_offset / CHAR_BIT) + msize);
1259 }
1260 }
1261 else
1262 {
1263 memb[vlen].ctlm_offsethi = 0;
1264 memb[vlen].ctlm_offsetlo = 0;
1265 ssize = ctf_get_ctt_size (fp, &dtd->dtd_data, NULL, NULL);
1266 ssize = MAX (ssize, msize);
1267 }
1268
1269 dtd->dtd_data.ctt_size = CTF_LSIZE_SENT;
1270 dtd->dtd_data.ctt_lsizehi = CTF_SIZE_TO_LSIZE_HI (ssize);
1271 dtd->dtd_data.ctt_lsizelo = CTF_SIZE_TO_LSIZE_LO (ssize);
1272 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, root, vlen + 1);
1273
1274 fp->ctf_flags |= LCTF_DIRTY;
1275 return 0;
1276 }
1277
1278 int
1279 ctf_add_member_encoded (ctf_dict_t *fp, ctf_id_t souid, const char *name,
1280 ctf_id_t type, unsigned long bit_offset,
1281 const ctf_encoding_t encoding)
1282 {
1283 ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, type);
1284 int kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
1285 int otype = type;
1286
1287 if ((kind != CTF_K_INTEGER) && (kind != CTF_K_FLOAT) && (kind != CTF_K_ENUM))
1288 return (ctf_set_errno (fp, ECTF_NOTINTFP));
1289
1290 if ((type = ctf_add_slice (fp, CTF_ADD_NONROOT, otype, &encoding)) == CTF_ERR)
1291 return -1; /* errno is set for us. */
1292
1293 return ctf_add_member_offset (fp, souid, name, type, bit_offset);
1294 }
1295
1296 int
1297 ctf_add_member (ctf_dict_t *fp, ctf_id_t souid, const char *name,
1298 ctf_id_t type)
1299 {
1300 return ctf_add_member_offset (fp, souid, name, type, (unsigned long) - 1);
1301 }
1302
1303 int
1304 ctf_add_variable (ctf_dict_t *fp, const char *name, ctf_id_t ref)
1305 {
1306 ctf_dvdef_t *dvd;
1307 ctf_dict_t *tmp = fp;
1308
1309 if (!(fp->ctf_flags & LCTF_RDWR))
1310 return (ctf_set_errno (fp, ECTF_RDONLY));
1311
1312 if (ctf_dvd_lookup (fp, name) != NULL)
1313 return (ctf_set_errno (fp, ECTF_DUPLICATE));
1314
1315 if (ctf_lookup_by_id (&tmp, ref) == NULL)
1316 return -1; /* errno is set for us. */
1317
1318 /* Make sure this type is representable. */
1319 if ((ctf_type_resolve (fp, ref) == CTF_ERR)
1320 && (ctf_errno (fp) == ECTF_NONREPRESENTABLE))
1321 return -1;
1322
1323 if ((dvd = malloc (sizeof (ctf_dvdef_t))) == NULL)
1324 return (ctf_set_errno (fp, EAGAIN));
1325
1326 if (name != NULL && (dvd->dvd_name = strdup (name)) == NULL)
1327 {
1328 free (dvd);
1329 return (ctf_set_errno (fp, EAGAIN));
1330 }
1331 dvd->dvd_type = ref;
1332 dvd->dvd_snapshots = fp->ctf_snapshots;
1333
1334 if (ctf_dvd_insert (fp, dvd) < 0)
1335 {
1336 free (dvd->dvd_name);
1337 free (dvd);
1338 return -1; /* errno is set for us. */
1339 }
1340
1341 fp->ctf_flags |= LCTF_DIRTY;
1342 return 0;
1343 }
1344
1345 int
1346 ctf_add_funcobjt_sym (ctf_dict_t *fp, int is_function, const char *name, ctf_id_t id)
1347 {
1348 ctf_dict_t *tmp = fp;
1349 char *dupname;
1350 ctf_dynhash_t *h = is_function ? fp->ctf_funchash : fp->ctf_objthash;
1351
1352 if (!(fp->ctf_flags & LCTF_RDWR))
1353 return (ctf_set_errno (fp, ECTF_RDONLY));
1354
1355 if (ctf_dynhash_lookup (fp->ctf_objthash, name) != NULL ||
1356 ctf_dynhash_lookup (fp->ctf_funchash, name) != NULL)
1357 return (ctf_set_errno (fp, ECTF_DUPLICATE));
1358
1359 if (ctf_lookup_by_id (&tmp, id) == NULL)
1360 return -1; /* errno is set for us. */
1361
1362 if (is_function && ctf_type_kind (fp, id) != CTF_K_FUNCTION)
1363 return (ctf_set_errno (fp, ECTF_NOTFUNC));
1364
1365 if ((dupname = strdup (name)) == NULL)
1366 return (ctf_set_errno (fp, ENOMEM));
1367
1368 if (ctf_dynhash_insert (h, dupname, (void *) (uintptr_t) id) < 0)
1369 {
1370 free (dupname);
1371 return (ctf_set_errno (fp, ENOMEM));
1372 }
1373 return 0;
1374 }
1375
1376 int
1377 ctf_add_objt_sym (ctf_dict_t *fp, const char *name, ctf_id_t id)
1378 {
1379 return (ctf_add_funcobjt_sym (fp, 0, name, id));
1380 }
1381
1382 int
1383 ctf_add_func_sym (ctf_dict_t *fp, const char *name, ctf_id_t id)
1384 {
1385 return (ctf_add_funcobjt_sym (fp, 1, name, id));
1386 }
1387
1388 typedef struct ctf_bundle
1389 {
1390 ctf_dict_t *ctb_dict; /* CTF dict handle. */
1391 ctf_id_t ctb_type; /* CTF type identifier. */
1392 ctf_dtdef_t *ctb_dtd; /* CTF dynamic type definition (if any). */
1393 } ctf_bundle_t;
1394
1395 static int
1396 enumcmp (const char *name, int value, void *arg)
1397 {
1398 ctf_bundle_t *ctb = arg;
1399 int bvalue;
1400
1401 if (ctf_enum_value (ctb->ctb_dict, ctb->ctb_type, name, &bvalue) < 0)
1402 {
1403 ctf_err_warn (ctb->ctb_dict, 0, 0,
1404 _("conflict due to enum %s iteration error"), name);
1405 return 1;
1406 }
1407 if (value != bvalue)
1408 {
1409 ctf_err_warn (ctb->ctb_dict, 1, ECTF_CONFLICT,
1410 _("conflict due to enum value change: %i versus %i"),
1411 value, bvalue);
1412 return 1;
1413 }
1414 return 0;
1415 }
1416
1417 static int
1418 enumadd (const char *name, int value, void *arg)
1419 {
1420 ctf_bundle_t *ctb = arg;
1421
1422 return (ctf_add_enumerator (ctb->ctb_dict, ctb->ctb_type,
1423 name, value) < 0);
1424 }
1425
1426 static int
1427 membcmp (const char *name, ctf_id_t type _libctf_unused_, unsigned long offset,
1428 void *arg)
1429 {
1430 ctf_bundle_t *ctb = arg;
1431 ctf_membinfo_t ctm;
1432
1433 /* Don't check nameless members (e.g. anonymous structs/unions) against each
1434 other. */
1435 if (name[0] == 0)
1436 return 0;
1437
1438 if (ctf_member_info (ctb->ctb_dict, ctb->ctb_type, name, &ctm) < 0)
1439 {
1440 ctf_err_warn (ctb->ctb_dict, 0, 0,
1441 _("conflict due to struct member %s iteration error"),
1442 name);
1443 return 1;
1444 }
1445 if (ctm.ctm_offset != offset)
1446 {
1447 ctf_err_warn (ctb->ctb_dict, 1, ECTF_CONFLICT,
1448 _("conflict due to struct member %s offset change: "
1449 "%lx versus %lx"),
1450 name, ctm.ctm_offset, offset);
1451 return 1;
1452 }
1453 return 0;
1454 }
1455
1456 /* Record the correspondence between a source and ctf_add_type()-added
1457 destination type: both types are translated into parent type IDs if need be,
1458 so they relate to the actual dictionary they are in. Outside controlled
1459 circumstances (like linking) it is probably not useful to do more than
1460 compare these pointers, since there is nothing stopping the user closing the
1461 source dict whenever they want to.
1462
1463 Our OOM handling here is just to not do anything, because this is called deep
1464 enough in the call stack that doing anything useful is painfully difficult:
1465 the worst consequence if we do OOM is a bit of type duplication anyway. */
1466
1467 static void
1468 ctf_add_type_mapping (ctf_dict_t *src_fp, ctf_id_t src_type,
1469 ctf_dict_t *dst_fp, ctf_id_t dst_type)
1470 {
1471 if (LCTF_TYPE_ISPARENT (src_fp, src_type) && src_fp->ctf_parent)
1472 src_fp = src_fp->ctf_parent;
1473
1474 src_type = LCTF_TYPE_TO_INDEX(src_fp, src_type);
1475
1476 if (LCTF_TYPE_ISPARENT (dst_fp, dst_type) && dst_fp->ctf_parent)
1477 dst_fp = dst_fp->ctf_parent;
1478
1479 dst_type = LCTF_TYPE_TO_INDEX(dst_fp, dst_type);
1480
1481 if (dst_fp->ctf_link_type_mapping == NULL)
1482 {
1483 ctf_hash_fun f = ctf_hash_type_key;
1484 ctf_hash_eq_fun e = ctf_hash_eq_type_key;
1485
1486 if ((dst_fp->ctf_link_type_mapping = ctf_dynhash_create (f, e, free,
1487 NULL)) == NULL)
1488 return;
1489 }
1490
1491 ctf_link_type_key_t *key;
1492 key = calloc (1, sizeof (struct ctf_link_type_key));
1493 if (!key)
1494 return;
1495
1496 key->cltk_fp = src_fp;
1497 key->cltk_idx = src_type;
1498
1499 /* No OOM checking needed, because if this doesn't work the worst we'll do is
1500 add a few more duplicate types (which will probably run out of memory
1501 anyway). */
1502 ctf_dynhash_insert (dst_fp->ctf_link_type_mapping, key,
1503 (void *) (uintptr_t) dst_type);
1504 }
1505
1506 /* Look up a type mapping: return 0 if none. The DST_FP is modified to point to
1507 the parent if need be. The ID returned is from the dst_fp's perspective. */
1508 static ctf_id_t
1509 ctf_type_mapping (ctf_dict_t *src_fp, ctf_id_t src_type, ctf_dict_t **dst_fp)
1510 {
1511 ctf_link_type_key_t key;
1512 ctf_dict_t *target_fp = *dst_fp;
1513 ctf_id_t dst_type = 0;
1514
1515 if (LCTF_TYPE_ISPARENT (src_fp, src_type) && src_fp->ctf_parent)
1516 src_fp = src_fp->ctf_parent;
1517
1518 src_type = LCTF_TYPE_TO_INDEX(src_fp, src_type);
1519 key.cltk_fp = src_fp;
1520 key.cltk_idx = src_type;
1521
1522 if (target_fp->ctf_link_type_mapping)
1523 dst_type = (uintptr_t) ctf_dynhash_lookup (target_fp->ctf_link_type_mapping,
1524 &key);
1525
1526 if (dst_type != 0)
1527 {
1528 dst_type = LCTF_INDEX_TO_TYPE (target_fp, dst_type,
1529 target_fp->ctf_parent != NULL);
1530 *dst_fp = target_fp;
1531 return dst_type;
1532 }
1533
1534 if (target_fp->ctf_parent)
1535 target_fp = target_fp->ctf_parent;
1536 else
1537 return 0;
1538
1539 if (target_fp->ctf_link_type_mapping)
1540 dst_type = (uintptr_t) ctf_dynhash_lookup (target_fp->ctf_link_type_mapping,
1541 &key);
1542
1543 if (dst_type)
1544 dst_type = LCTF_INDEX_TO_TYPE (target_fp, dst_type,
1545 target_fp->ctf_parent != NULL);
1546
1547 *dst_fp = target_fp;
1548 return dst_type;
1549 }
1550
1551 /* The ctf_add_type routine is used to copy a type from a source CTF dictionary
1552 to a dynamic destination dictionary. This routine operates recursively by
1553 following the source type's links and embedded member types. If the
1554 destination dict already contains a named type which has the same attributes,
1555 then we succeed and return this type but no changes occur. */
1556 static ctf_id_t
1557 ctf_add_type_internal (ctf_dict_t *dst_fp, ctf_dict_t *src_fp, ctf_id_t src_type,
1558 ctf_dict_t *proc_tracking_fp)
1559 {
1560 ctf_id_t dst_type = CTF_ERR;
1561 uint32_t dst_kind = CTF_K_UNKNOWN;
1562 ctf_dict_t *tmp_fp = dst_fp;
1563 ctf_id_t tmp;
1564
1565 const char *name;
1566 uint32_t kind, forward_kind, flag, vlen;
1567
1568 const ctf_type_t *src_tp, *dst_tp;
1569 ctf_bundle_t src, dst;
1570 ctf_encoding_t src_en, dst_en;
1571 ctf_arinfo_t src_ar, dst_ar;
1572
1573 ctf_funcinfo_t ctc;
1574
1575 ctf_id_t orig_src_type = src_type;
1576
1577 if (!(dst_fp->ctf_flags & LCTF_RDWR))
1578 return (ctf_set_errno (dst_fp, ECTF_RDONLY));
1579
1580 if ((src_tp = ctf_lookup_by_id (&src_fp, src_type)) == NULL)
1581 return (ctf_set_errno (dst_fp, ctf_errno (src_fp)));
1582
1583 if ((ctf_type_resolve (src_fp, src_type) == CTF_ERR)
1584 && (ctf_errno (src_fp) == ECTF_NONREPRESENTABLE))
1585 return (ctf_set_errno (dst_fp, ECTF_NONREPRESENTABLE));
1586
1587 name = ctf_strptr (src_fp, src_tp->ctt_name);
1588 kind = LCTF_INFO_KIND (src_fp, src_tp->ctt_info);
1589 flag = LCTF_INFO_ISROOT (src_fp, src_tp->ctt_info);
1590 vlen = LCTF_INFO_VLEN (src_fp, src_tp->ctt_info);
1591
1592 /* If this is a type we are currently in the middle of adding, hand it
1593 straight back. (This lets us handle self-referential structures without
1594 considering forwards and empty structures the same as their completed
1595 forms.) */
1596
1597 tmp = ctf_type_mapping (src_fp, src_type, &tmp_fp);
1598
1599 if (tmp != 0)
1600 {
1601 if (ctf_dynhash_lookup (proc_tracking_fp->ctf_add_processing,
1602 (void *) (uintptr_t) src_type))
1603 return tmp;
1604
1605 /* If this type has already been added from this dictionary, and is the
1606 same kind and (if a struct or union) has the same number of members,
1607 hand it straight back. */
1608
1609 if (ctf_type_kind_unsliced (tmp_fp, tmp) == (int) kind)
1610 {
1611 if (kind == CTF_K_STRUCT || kind == CTF_K_UNION
1612 || kind == CTF_K_ENUM)
1613 {
1614 if ((dst_tp = ctf_lookup_by_id (&tmp_fp, dst_type)) != NULL)
1615 if (vlen == LCTF_INFO_VLEN (tmp_fp, dst_tp->ctt_info))
1616 return tmp;
1617 }
1618 else
1619 return tmp;
1620 }
1621 }
1622
1623 forward_kind = kind;
1624 if (kind == CTF_K_FORWARD)
1625 forward_kind = src_tp->ctt_type;
1626
1627 /* If the source type has a name and is a root type (visible at the top-level
1628 scope), lookup the name in the destination dictionary and verify that it is
1629 of the same kind before we do anything else. */
1630
1631 if ((flag & CTF_ADD_ROOT) && name[0] != '\0'
1632 && (tmp = ctf_lookup_by_rawname (dst_fp, forward_kind, name)) != 0)
1633 {
1634 dst_type = tmp;
1635 dst_kind = ctf_type_kind_unsliced (dst_fp, dst_type);
1636 }
1637
1638 /* If an identically named dst_type exists, fail with ECTF_CONFLICT
1639 unless dst_type is a forward declaration and src_type is a struct,
1640 union, or enum (i.e. the definition of the previous forward decl).
1641
1642 We also allow addition in the opposite order (addition of a forward when a
1643 struct, union, or enum already exists), which is a NOP and returns the
1644 already-present struct, union, or enum. */
1645
1646 if (dst_type != CTF_ERR && dst_kind != kind)
1647 {
1648 if (kind == CTF_K_FORWARD
1649 && (dst_kind == CTF_K_ENUM || dst_kind == CTF_K_STRUCT
1650 || dst_kind == CTF_K_UNION))
1651 {
1652 ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type);
1653 return dst_type;
1654 }
1655
1656 if (dst_kind != CTF_K_FORWARD
1657 || (kind != CTF_K_ENUM && kind != CTF_K_STRUCT
1658 && kind != CTF_K_UNION))
1659 {
1660 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
1661 _("ctf_add_type: conflict for type %s: "
1662 "kinds differ, new: %i; old (ID %lx): %i"),
1663 name, kind, dst_type, dst_kind);
1664 return (ctf_set_errno (dst_fp, ECTF_CONFLICT));
1665 }
1666 }
1667
1668 /* We take special action for an integer, float, or slice since it is
1669 described not only by its name but also its encoding. For integers,
1670 bit-fields exploit this degeneracy. */
1671
1672 if (kind == CTF_K_INTEGER || kind == CTF_K_FLOAT || kind == CTF_K_SLICE)
1673 {
1674 if (ctf_type_encoding (src_fp, src_type, &src_en) != 0)
1675 return (ctf_set_errno (dst_fp, ctf_errno (src_fp)));
1676
1677 if (dst_type != CTF_ERR)
1678 {
1679 ctf_dict_t *fp = dst_fp;
1680
1681 if ((dst_tp = ctf_lookup_by_id (&fp, dst_type)) == NULL)
1682 return CTF_ERR;
1683
1684 if (ctf_type_encoding (dst_fp, dst_type, &dst_en) != 0)
1685 return CTF_ERR; /* errno set for us. */
1686
1687 if (LCTF_INFO_ISROOT (fp, dst_tp->ctt_info) & CTF_ADD_ROOT)
1688 {
1689 /* The type that we found in the hash is also root-visible. If
1690 the two types match then use the existing one; otherwise,
1691 declare a conflict. Note: slices are not certain to match
1692 even if there is no conflict: we must check the contained type
1693 too. */
1694
1695 if (memcmp (&src_en, &dst_en, sizeof (ctf_encoding_t)) == 0)
1696 {
1697 if (kind != CTF_K_SLICE)
1698 {
1699 ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type);
1700 return dst_type;
1701 }
1702 }
1703 else
1704 {
1705 return (ctf_set_errno (dst_fp, ECTF_CONFLICT));
1706 }
1707 }
1708 else
1709 {
1710 /* We found a non-root-visible type in the hash. If its encoding
1711 is the same, we can reuse it, unless it is a slice. */
1712
1713 if (memcmp (&src_en, &dst_en, sizeof (ctf_encoding_t)) == 0)
1714 {
1715 if (kind != CTF_K_SLICE)
1716 {
1717 ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type);
1718 return dst_type;
1719 }
1720 }
1721 }
1722 }
1723 }
1724
1725 src.ctb_dict = src_fp;
1726 src.ctb_type = src_type;
1727 src.ctb_dtd = NULL;
1728
1729 dst.ctb_dict = dst_fp;
1730 dst.ctb_type = dst_type;
1731 dst.ctb_dtd = NULL;
1732
1733 /* Now perform kind-specific processing. If dst_type is CTF_ERR, then we add
1734 a new type with the same properties as src_type to dst_fp. If dst_type is
1735 not CTF_ERR, then we verify that dst_type has the same attributes as
1736 src_type. We recurse for embedded references. Before we start, we note
1737 that we are processing this type, to prevent infinite recursion: we do not
1738 re-process any type that appears in this list. The list is emptied
1739 wholesale at the end of processing everything in this recursive stack. */
1740
1741 if (ctf_dynhash_insert (proc_tracking_fp->ctf_add_processing,
1742 (void *) (uintptr_t) src_type, (void *) 1) < 0)
1743 return ctf_set_errno (dst_fp, ENOMEM);
1744
1745 switch (kind)
1746 {
1747 case CTF_K_INTEGER:
1748 /* If we found a match we will have either returned it or declared a
1749 conflict. */
1750 dst_type = ctf_add_integer (dst_fp, flag, name, &src_en);
1751 break;
1752
1753 case CTF_K_FLOAT:
1754 /* If we found a match we will have either returned it or declared a
1755 conflict. */
1756 dst_type = ctf_add_float (dst_fp, flag, name, &src_en);
1757 break;
1758
1759 case CTF_K_SLICE:
1760 /* We have checked for conflicting encodings: now try to add the
1761 contained type. */
1762 src_type = ctf_type_reference (src_fp, src_type);
1763 src_type = ctf_add_type_internal (dst_fp, src_fp, src_type,
1764 proc_tracking_fp);
1765
1766 if (src_type == CTF_ERR)
1767 return CTF_ERR; /* errno is set for us. */
1768
1769 dst_type = ctf_add_slice (dst_fp, flag, src_type, &src_en);
1770 break;
1771
1772 case CTF_K_POINTER:
1773 case CTF_K_VOLATILE:
1774 case CTF_K_CONST:
1775 case CTF_K_RESTRICT:
1776 src_type = ctf_type_reference (src_fp, src_type);
1777 src_type = ctf_add_type_internal (dst_fp, src_fp, src_type,
1778 proc_tracking_fp);
1779
1780 if (src_type == CTF_ERR)
1781 return CTF_ERR; /* errno is set for us. */
1782
1783 dst_type = ctf_add_reftype (dst_fp, flag, src_type, kind);
1784 break;
1785
1786 case CTF_K_ARRAY:
1787 if (ctf_array_info (src_fp, src_type, &src_ar) != 0)
1788 return (ctf_set_errno (dst_fp, ctf_errno (src_fp)));
1789
1790 src_ar.ctr_contents =
1791 ctf_add_type_internal (dst_fp, src_fp, src_ar.ctr_contents,
1792 proc_tracking_fp);
1793 src_ar.ctr_index = ctf_add_type_internal (dst_fp, src_fp,
1794 src_ar.ctr_index,
1795 proc_tracking_fp);
1796 src_ar.ctr_nelems = src_ar.ctr_nelems;
1797
1798 if (src_ar.ctr_contents == CTF_ERR || src_ar.ctr_index == CTF_ERR)
1799 return CTF_ERR; /* errno is set for us. */
1800
1801 if (dst_type != CTF_ERR)
1802 {
1803 if (ctf_array_info (dst_fp, dst_type, &dst_ar) != 0)
1804 return CTF_ERR; /* errno is set for us. */
1805
1806 if (memcmp (&src_ar, &dst_ar, sizeof (ctf_arinfo_t)))
1807 {
1808 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
1809 _("conflict for type %s against ID %lx: array info "
1810 "differs, old %lx/%lx/%x; new: %lx/%lx/%x"),
1811 name, dst_type, src_ar.ctr_contents,
1812 src_ar.ctr_index, src_ar.ctr_nelems,
1813 dst_ar.ctr_contents, dst_ar.ctr_index,
1814 dst_ar.ctr_nelems);
1815 return (ctf_set_errno (dst_fp, ECTF_CONFLICT));
1816 }
1817 }
1818 else
1819 dst_type = ctf_add_array (dst_fp, flag, &src_ar);
1820 break;
1821
1822 case CTF_K_FUNCTION:
1823 ctc.ctc_return = ctf_add_type_internal (dst_fp, src_fp,
1824 src_tp->ctt_type,
1825 proc_tracking_fp);
1826 ctc.ctc_argc = 0;
1827 ctc.ctc_flags = 0;
1828
1829 if (ctc.ctc_return == CTF_ERR)
1830 return CTF_ERR; /* errno is set for us. */
1831
1832 dst_type = ctf_add_function (dst_fp, flag, &ctc, NULL);
1833 break;
1834
1835 case CTF_K_STRUCT:
1836 case CTF_K_UNION:
1837 {
1838 ctf_next_t *i = NULL;
1839 ssize_t offset;
1840 const char *membname;
1841 ctf_id_t src_membtype;
1842
1843 /* Technically to match a struct or union we need to check both
1844 ways (src members vs. dst, dst members vs. src) but we make
1845 this more optimal by only checking src vs. dst and comparing
1846 the total size of the structure (which we must do anyway)
1847 which covers the possibility of dst members not in src.
1848 This optimization can be defeated for unions, but is so
1849 pathological as to render it irrelevant for our purposes. */
1850
1851 if (dst_type != CTF_ERR && kind != CTF_K_FORWARD
1852 && dst_kind != CTF_K_FORWARD)
1853 {
1854 if (ctf_type_size (src_fp, src_type) !=
1855 ctf_type_size (dst_fp, dst_type))
1856 {
1857 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
1858 _("conflict for type %s against ID %lx: union "
1859 "size differs, old %li, new %li"), name,
1860 dst_type, (long) ctf_type_size (src_fp, src_type),
1861 (long) ctf_type_size (dst_fp, dst_type));
1862 return (ctf_set_errno (dst_fp, ECTF_CONFLICT));
1863 }
1864
1865 if (ctf_member_iter (src_fp, src_type, membcmp, &dst))
1866 {
1867 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
1868 _("conflict for type %s against ID %lx: members "
1869 "differ, see above"), name, dst_type);
1870 return (ctf_set_errno (dst_fp, ECTF_CONFLICT));
1871 }
1872
1873 break;
1874 }
1875
1876 dst_type = ctf_add_struct_sized (dst_fp, flag, name,
1877 ctf_type_size (src_fp, src_type));
1878 if (dst_type == CTF_ERR)
1879 return CTF_ERR; /* errno is set for us. */
1880
1881 /* Pre-emptively add this struct to the type mapping so that
1882 structures that refer to themselves work. */
1883 ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type);
1884
1885 while ((offset = ctf_member_next (src_fp, src_type, &i, &membname,
1886 &src_membtype, 0)) >= 0)
1887 {
1888 ctf_dict_t *dst = dst_fp;
1889 ctf_id_t dst_membtype = ctf_type_mapping (src_fp, src_membtype, &dst);
1890
1891 if (dst_membtype == 0)
1892 {
1893 dst_membtype = ctf_add_type_internal (dst_fp, src_fp,
1894 src_membtype,
1895 proc_tracking_fp);
1896 if (dst_membtype == CTF_ERR)
1897 {
1898 if (ctf_errno (dst_fp) != ECTF_NONREPRESENTABLE)
1899 {
1900 ctf_next_destroy (i);
1901 break;
1902 }
1903 }
1904 }
1905
1906 if (ctf_add_member_offset (dst_fp, dst_type, membname,
1907 dst_membtype, offset) < 0)
1908 {
1909 ctf_next_destroy (i);
1910 break;
1911 }
1912 }
1913 if (ctf_errno (src_fp) != ECTF_NEXT_END)
1914 return CTF_ERR; /* errno is set for us. */
1915 break;
1916 }
1917
1918 case CTF_K_ENUM:
1919 if (dst_type != CTF_ERR && kind != CTF_K_FORWARD
1920 && dst_kind != CTF_K_FORWARD)
1921 {
1922 if (ctf_enum_iter (src_fp, src_type, enumcmp, &dst)
1923 || ctf_enum_iter (dst_fp, dst_type, enumcmp, &src))
1924 {
1925 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
1926 _("conflict for enum %s against ID %lx: members "
1927 "differ, see above"), name, dst_type);
1928 return (ctf_set_errno (dst_fp, ECTF_CONFLICT));
1929 }
1930 }
1931 else
1932 {
1933 dst_type = ctf_add_enum (dst_fp, flag, name);
1934 if ((dst.ctb_type = dst_type) == CTF_ERR
1935 || ctf_enum_iter (src_fp, src_type, enumadd, &dst))
1936 return CTF_ERR; /* errno is set for us */
1937 }
1938 break;
1939
1940 case CTF_K_FORWARD:
1941 if (dst_type == CTF_ERR)
1942 dst_type = ctf_add_forward (dst_fp, flag, name, forward_kind);
1943 break;
1944
1945 case CTF_K_TYPEDEF:
1946 src_type = ctf_type_reference (src_fp, src_type);
1947 src_type = ctf_add_type_internal (dst_fp, src_fp, src_type,
1948 proc_tracking_fp);
1949
1950 if (src_type == CTF_ERR)
1951 return CTF_ERR; /* errno is set for us. */
1952
1953 /* If dst_type is not CTF_ERR at this point, we should check if
1954 ctf_type_reference(dst_fp, dst_type) != src_type and if so fail with
1955 ECTF_CONFLICT. However, this causes problems with bitness typedefs
1956 that vary based on things like if 32-bit then pid_t is int otherwise
1957 long. We therefore omit this check and assume that if the identically
1958 named typedef already exists in dst_fp, it is correct or
1959 equivalent. */
1960
1961 if (dst_type == CTF_ERR)
1962 dst_type = ctf_add_typedef (dst_fp, flag, name, src_type);
1963
1964 break;
1965
1966 default:
1967 return (ctf_set_errno (dst_fp, ECTF_CORRUPT));
1968 }
1969
1970 if (dst_type != CTF_ERR)
1971 ctf_add_type_mapping (src_fp, orig_src_type, dst_fp, dst_type);
1972 return dst_type;
1973 }
1974
1975 ctf_id_t
1976 ctf_add_type (ctf_dict_t *dst_fp, ctf_dict_t *src_fp, ctf_id_t src_type)
1977 {
1978 ctf_id_t id;
1979
1980 if (!src_fp->ctf_add_processing)
1981 src_fp->ctf_add_processing = ctf_dynhash_create (ctf_hash_integer,
1982 ctf_hash_eq_integer,
1983 NULL, NULL);
1984
1985 /* We store the hash on the source, because it contains only source type IDs:
1986 but callers will invariably expect errors to appear on the dest. */
1987 if (!src_fp->ctf_add_processing)
1988 return (ctf_set_errno (dst_fp, ENOMEM));
1989
1990 id = ctf_add_type_internal (dst_fp, src_fp, src_type, src_fp);
1991 ctf_dynhash_empty (src_fp->ctf_add_processing);
1992
1993 return id;
1994 }
This page took 0.072628 seconds and 4 git commands to generate.