92d9e16e359d337fcafee0d38ca1c703722a1ee9
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001, 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor,
22 Boston, MA 02110-1301, USA. */
23
24 #include "defs.h"
25 #include "symtab.h"
26 #include "gdbtypes.h"
27 #include "value.h"
28 #include "gdbcore.h"
29 #include "elf/dwarf2.h"
30 #include "dwarf2expr.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 gdb_byte *, gdb_byte *);
36 static struct type *unsigned_address_type (void);
37
38 /* Create a new context for the expression evaluator. */
39
40 struct dwarf_expr_context *
41 new_dwarf_expr_context (void)
42 {
43 struct dwarf_expr_context *retval;
44 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
45 retval->stack_len = 0;
46 retval->stack_allocated = 10;
47 retval->stack = xmalloc (retval->stack_allocated * sizeof (CORE_ADDR));
48 retval->num_pieces = 0;
49 retval->pieces = 0;
50 return retval;
51 }
52
53 /* Release the memory allocated to CTX. */
54
55 void
56 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
57 {
58 xfree (ctx->stack);
59 xfree (ctx->pieces);
60 xfree (ctx);
61 }
62
63 /* Expand the memory allocated to CTX's stack to contain at least
64 NEED more elements than are currently used. */
65
66 static void
67 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
68 {
69 if (ctx->stack_len + need > ctx->stack_allocated)
70 {
71 size_t newlen = ctx->stack_len + need + 10;
72 ctx->stack = xrealloc (ctx->stack,
73 newlen * sizeof (CORE_ADDR));
74 ctx->stack_allocated = newlen;
75 }
76 }
77
78 /* Push VALUE onto CTX's stack. */
79
80 void
81 dwarf_expr_push (struct dwarf_expr_context *ctx, CORE_ADDR value)
82 {
83 dwarf_expr_grow_stack (ctx, 1);
84 ctx->stack[ctx->stack_len++] = value;
85 }
86
87 /* Pop the top item off of CTX's stack. */
88
89 void
90 dwarf_expr_pop (struct dwarf_expr_context *ctx)
91 {
92 if (ctx->stack_len <= 0)
93 error (_("dwarf expression stack underflow"));
94 ctx->stack_len--;
95 }
96
97 /* Retrieve the N'th item on CTX's stack. */
98
99 CORE_ADDR
100 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
101 {
102 if (ctx->stack_len <= n)
103 error (_("Asked for position %d of stack, stack only has %d elements on it."),
104 n, ctx->stack_len);
105 return ctx->stack[ctx->stack_len - (1 + n)];
106
107 }
108
109 /* Add a new piece to CTX's piece list. */
110 static void
111 add_piece (struct dwarf_expr_context *ctx,
112 int in_reg, CORE_ADDR value, ULONGEST size)
113 {
114 struct dwarf_expr_piece *p;
115
116 ctx->num_pieces++;
117
118 if (ctx->pieces)
119 ctx->pieces = xrealloc (ctx->pieces,
120 (ctx->num_pieces
121 * sizeof (struct dwarf_expr_piece)));
122 else
123 ctx->pieces = xmalloc (ctx->num_pieces
124 * sizeof (struct dwarf_expr_piece));
125
126 p = &ctx->pieces[ctx->num_pieces - 1];
127 p->in_reg = in_reg;
128 p->value = value;
129 p->size = size;
130 }
131
132 /* Evaluate the expression at ADDR (LEN bytes long) using the context
133 CTX. */
134
135 void
136 dwarf_expr_eval (struct dwarf_expr_context *ctx, gdb_byte *addr, size_t len)
137 {
138 execute_stack_op (ctx, addr, addr + len);
139 }
140
141 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
142 by R, and return the new value of BUF. Verify that it doesn't extend
143 past BUF_END. */
144
145 gdb_byte *
146 read_uleb128 (gdb_byte *buf, gdb_byte *buf_end, ULONGEST * r)
147 {
148 unsigned shift = 0;
149 ULONGEST result = 0;
150 gdb_byte byte;
151
152 while (1)
153 {
154 if (buf >= buf_end)
155 error (_("read_uleb128: Corrupted DWARF expression."));
156
157 byte = *buf++;
158 result |= (byte & 0x7f) << shift;
159 if ((byte & 0x80) == 0)
160 break;
161 shift += 7;
162 }
163 *r = result;
164 return buf;
165 }
166
167 /* Decode the signed LEB128 constant at BUF into the variable pointed to
168 by R, and return the new value of BUF. Verify that it doesn't extend
169 past BUF_END. */
170
171 gdb_byte *
172 read_sleb128 (gdb_byte *buf, gdb_byte *buf_end, LONGEST * r)
173 {
174 unsigned shift = 0;
175 LONGEST result = 0;
176 gdb_byte byte;
177
178 while (1)
179 {
180 if (buf >= buf_end)
181 error (_("read_sleb128: Corrupted DWARF expression."));
182
183 byte = *buf++;
184 result |= (byte & 0x7f) << shift;
185 shift += 7;
186 if ((byte & 0x80) == 0)
187 break;
188 }
189 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
190 result |= -(1 << shift);
191
192 *r = result;
193 return buf;
194 }
195
196 /* Read an address from BUF, and verify that it doesn't extend past
197 BUF_END. The address is returned, and *BYTES_READ is set to the
198 number of bytes read from BUF. */
199
200 CORE_ADDR
201 dwarf2_read_address (gdb_byte *buf, gdb_byte *buf_end, int *bytes_read)
202 {
203 CORE_ADDR result;
204
205 if (buf_end - buf < TARGET_ADDR_BIT / TARGET_CHAR_BIT)
206 error (_("dwarf2_read_address: Corrupted DWARF expression."));
207
208 *bytes_read = TARGET_ADDR_BIT / TARGET_CHAR_BIT;
209
210 /* For most architectures, calling extract_unsigned_integer() alone
211 is sufficient for extracting an address. However, some
212 architectures (e.g. MIPS) use signed addresses and using
213 extract_unsigned_integer() will not produce a correct
214 result. Turning the unsigned integer into a value and then
215 decomposing that value as an address will cause
216 gdbarch_integer_to_address() to be invoked for those
217 architectures which require it. Thus, using value_as_address()
218 will produce the correct result for both types of architectures.
219
220 One concern regarding the use of values for this purpose is
221 efficiency. Obviously, these extra calls will take more time to
222 execute and creating a value takes more space, space which will
223 have to be garbage collected at a later time. If constructing
224 and then decomposing a value for this purpose proves to be too
225 inefficient, then gdbarch_integer_to_address() can be called
226 directly.
227
228 The use of `unsigned_address_type' in the code below refers to
229 the type of buf and has no bearing on the signedness of the
230 address being returned. */
231
232 result = value_as_address (value_from_longest
233 (unsigned_address_type (),
234 extract_unsigned_integer
235 (buf,
236 TARGET_ADDR_BIT / TARGET_CHAR_BIT)));
237
238 return result;
239 }
240
241 /* Return the type of an address, for unsigned arithmetic. */
242
243 static struct type *
244 unsigned_address_type (void)
245 {
246 switch (TARGET_ADDR_BIT / TARGET_CHAR_BIT)
247 {
248 case 2:
249 return builtin_type_uint16;
250 case 4:
251 return builtin_type_uint32;
252 case 8:
253 return builtin_type_uint64;
254 default:
255 internal_error (__FILE__, __LINE__,
256 _("Unsupported address size.\n"));
257 }
258 }
259
260 /* Return the type of an address, for signed arithmetic. */
261
262 static struct type *
263 signed_address_type (void)
264 {
265 switch (TARGET_ADDR_BIT / TARGET_CHAR_BIT)
266 {
267 case 2:
268 return builtin_type_int16;
269 case 4:
270 return builtin_type_int32;
271 case 8:
272 return builtin_type_int64;
273 default:
274 internal_error (__FILE__, __LINE__,
275 _("Unsupported address size.\n"));
276 }
277 }
278 \f
279 /* The engine for the expression evaluator. Using the context in CTX,
280 evaluate the expression between OP_PTR and OP_END. */
281
282 static void
283 execute_stack_op (struct dwarf_expr_context *ctx,
284 gdb_byte *op_ptr, gdb_byte *op_end)
285 {
286 ctx->in_reg = 0;
287 ctx->initialized = 1; /* Default is initialized. */
288
289 while (op_ptr < op_end)
290 {
291 enum dwarf_location_atom op = *op_ptr++;
292 CORE_ADDR result;
293 ULONGEST uoffset, reg;
294 LONGEST offset;
295 int bytes_read;
296
297 switch (op)
298 {
299 case DW_OP_lit0:
300 case DW_OP_lit1:
301 case DW_OP_lit2:
302 case DW_OP_lit3:
303 case DW_OP_lit4:
304 case DW_OP_lit5:
305 case DW_OP_lit6:
306 case DW_OP_lit7:
307 case DW_OP_lit8:
308 case DW_OP_lit9:
309 case DW_OP_lit10:
310 case DW_OP_lit11:
311 case DW_OP_lit12:
312 case DW_OP_lit13:
313 case DW_OP_lit14:
314 case DW_OP_lit15:
315 case DW_OP_lit16:
316 case DW_OP_lit17:
317 case DW_OP_lit18:
318 case DW_OP_lit19:
319 case DW_OP_lit20:
320 case DW_OP_lit21:
321 case DW_OP_lit22:
322 case DW_OP_lit23:
323 case DW_OP_lit24:
324 case DW_OP_lit25:
325 case DW_OP_lit26:
326 case DW_OP_lit27:
327 case DW_OP_lit28:
328 case DW_OP_lit29:
329 case DW_OP_lit30:
330 case DW_OP_lit31:
331 result = op - DW_OP_lit0;
332 break;
333
334 case DW_OP_addr:
335 result = dwarf2_read_address (op_ptr, op_end, &bytes_read);
336 op_ptr += bytes_read;
337 break;
338
339 case DW_OP_const1u:
340 result = extract_unsigned_integer (op_ptr, 1);
341 op_ptr += 1;
342 break;
343 case DW_OP_const1s:
344 result = extract_signed_integer (op_ptr, 1);
345 op_ptr += 1;
346 break;
347 case DW_OP_const2u:
348 result = extract_unsigned_integer (op_ptr, 2);
349 op_ptr += 2;
350 break;
351 case DW_OP_const2s:
352 result = extract_signed_integer (op_ptr, 2);
353 op_ptr += 2;
354 break;
355 case DW_OP_const4u:
356 result = extract_unsigned_integer (op_ptr, 4);
357 op_ptr += 4;
358 break;
359 case DW_OP_const4s:
360 result = extract_signed_integer (op_ptr, 4);
361 op_ptr += 4;
362 break;
363 case DW_OP_const8u:
364 result = extract_unsigned_integer (op_ptr, 8);
365 op_ptr += 8;
366 break;
367 case DW_OP_const8s:
368 result = extract_signed_integer (op_ptr, 8);
369 op_ptr += 8;
370 break;
371 case DW_OP_constu:
372 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
373 result = uoffset;
374 break;
375 case DW_OP_consts:
376 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
377 result = offset;
378 break;
379
380 /* The DW_OP_reg operations are required to occur alone in
381 location expressions. */
382 case DW_OP_reg0:
383 case DW_OP_reg1:
384 case DW_OP_reg2:
385 case DW_OP_reg3:
386 case DW_OP_reg4:
387 case DW_OP_reg5:
388 case DW_OP_reg6:
389 case DW_OP_reg7:
390 case DW_OP_reg8:
391 case DW_OP_reg9:
392 case DW_OP_reg10:
393 case DW_OP_reg11:
394 case DW_OP_reg12:
395 case DW_OP_reg13:
396 case DW_OP_reg14:
397 case DW_OP_reg15:
398 case DW_OP_reg16:
399 case DW_OP_reg17:
400 case DW_OP_reg18:
401 case DW_OP_reg19:
402 case DW_OP_reg20:
403 case DW_OP_reg21:
404 case DW_OP_reg22:
405 case DW_OP_reg23:
406 case DW_OP_reg24:
407 case DW_OP_reg25:
408 case DW_OP_reg26:
409 case DW_OP_reg27:
410 case DW_OP_reg28:
411 case DW_OP_reg29:
412 case DW_OP_reg30:
413 case DW_OP_reg31:
414 if (op_ptr != op_end
415 && *op_ptr != DW_OP_piece
416 && *op_ptr != DW_OP_GNU_uninit)
417 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
418 "used either alone or in conjuction with DW_OP_piece."));
419
420 result = op - DW_OP_reg0;
421 ctx->in_reg = 1;
422
423 break;
424
425 case DW_OP_regx:
426 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
427 if (op_ptr != op_end && *op_ptr != DW_OP_piece)
428 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
429 "used either alone or in conjuction with DW_OP_piece."));
430
431 result = reg;
432 ctx->in_reg = 1;
433 break;
434
435 case DW_OP_breg0:
436 case DW_OP_breg1:
437 case DW_OP_breg2:
438 case DW_OP_breg3:
439 case DW_OP_breg4:
440 case DW_OP_breg5:
441 case DW_OP_breg6:
442 case DW_OP_breg7:
443 case DW_OP_breg8:
444 case DW_OP_breg9:
445 case DW_OP_breg10:
446 case DW_OP_breg11:
447 case DW_OP_breg12:
448 case DW_OP_breg13:
449 case DW_OP_breg14:
450 case DW_OP_breg15:
451 case DW_OP_breg16:
452 case DW_OP_breg17:
453 case DW_OP_breg18:
454 case DW_OP_breg19:
455 case DW_OP_breg20:
456 case DW_OP_breg21:
457 case DW_OP_breg22:
458 case DW_OP_breg23:
459 case DW_OP_breg24:
460 case DW_OP_breg25:
461 case DW_OP_breg26:
462 case DW_OP_breg27:
463 case DW_OP_breg28:
464 case DW_OP_breg29:
465 case DW_OP_breg30:
466 case DW_OP_breg31:
467 {
468 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
469 result = (ctx->read_reg) (ctx->baton, op - DW_OP_breg0);
470 result += offset;
471 }
472 break;
473 case DW_OP_bregx:
474 {
475 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
476 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
477 result = (ctx->read_reg) (ctx->baton, reg);
478 result += offset;
479 }
480 break;
481 case DW_OP_fbreg:
482 {
483 gdb_byte *datastart;
484 size_t datalen;
485 unsigned int before_stack_len;
486
487 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
488 /* Rather than create a whole new context, we simply
489 record the stack length before execution, then reset it
490 afterwards, effectively erasing whatever the recursive
491 call put there. */
492 before_stack_len = ctx->stack_len;
493 /* FIXME: cagney/2003-03-26: This code should be using
494 get_frame_base_address(), and then implement a dwarf2
495 specific this_base method. */
496 (ctx->get_frame_base) (ctx->baton, &datastart, &datalen);
497 dwarf_expr_eval (ctx, datastart, datalen);
498 result = dwarf_expr_fetch (ctx, 0);
499 if (ctx->in_reg)
500 result = (ctx->read_reg) (ctx->baton, result);
501 result = result + offset;
502 ctx->stack_len = before_stack_len;
503 ctx->in_reg = 0;
504 }
505 break;
506 case DW_OP_dup:
507 result = dwarf_expr_fetch (ctx, 0);
508 break;
509
510 case DW_OP_drop:
511 dwarf_expr_pop (ctx);
512 goto no_push;
513
514 case DW_OP_pick:
515 offset = *op_ptr++;
516 result = dwarf_expr_fetch (ctx, offset);
517 break;
518
519 case DW_OP_over:
520 result = dwarf_expr_fetch (ctx, 1);
521 break;
522
523 case DW_OP_rot:
524 {
525 CORE_ADDR t1, t2, t3;
526
527 if (ctx->stack_len < 3)
528 error (_("Not enough elements for DW_OP_rot. Need 3, have %d."),
529 ctx->stack_len);
530 t1 = ctx->stack[ctx->stack_len - 1];
531 t2 = ctx->stack[ctx->stack_len - 2];
532 t3 = ctx->stack[ctx->stack_len - 3];
533 ctx->stack[ctx->stack_len - 1] = t2;
534 ctx->stack[ctx->stack_len - 2] = t3;
535 ctx->stack[ctx->stack_len - 3] = t1;
536 goto no_push;
537 }
538
539 case DW_OP_deref:
540 case DW_OP_deref_size:
541 case DW_OP_abs:
542 case DW_OP_neg:
543 case DW_OP_not:
544 case DW_OP_plus_uconst:
545 /* Unary operations. */
546 result = dwarf_expr_fetch (ctx, 0);
547 dwarf_expr_pop (ctx);
548
549 switch (op)
550 {
551 case DW_OP_deref:
552 {
553 gdb_byte *buf = alloca (TARGET_ADDR_BIT / TARGET_CHAR_BIT);
554 int bytes_read;
555
556 (ctx->read_mem) (ctx->baton, buf, result,
557 TARGET_ADDR_BIT / TARGET_CHAR_BIT);
558 result = dwarf2_read_address (buf,
559 buf + (TARGET_ADDR_BIT
560 / TARGET_CHAR_BIT),
561 &bytes_read);
562 }
563 break;
564
565 case DW_OP_deref_size:
566 {
567 gdb_byte *buf = alloca (TARGET_ADDR_BIT / TARGET_CHAR_BIT);
568 int bytes_read;
569
570 (ctx->read_mem) (ctx->baton, buf, result, *op_ptr++);
571 result = dwarf2_read_address (buf,
572 buf + (TARGET_ADDR_BIT
573 / TARGET_CHAR_BIT),
574 &bytes_read);
575 }
576 break;
577
578 case DW_OP_abs:
579 if ((signed int) result < 0)
580 result = -result;
581 break;
582 case DW_OP_neg:
583 result = -result;
584 break;
585 case DW_OP_not:
586 result = ~result;
587 break;
588 case DW_OP_plus_uconst:
589 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
590 result += reg;
591 break;
592 }
593 break;
594
595 case DW_OP_and:
596 case DW_OP_div:
597 case DW_OP_minus:
598 case DW_OP_mod:
599 case DW_OP_mul:
600 case DW_OP_or:
601 case DW_OP_plus:
602 case DW_OP_shl:
603 case DW_OP_shr:
604 case DW_OP_shra:
605 case DW_OP_xor:
606 case DW_OP_le:
607 case DW_OP_ge:
608 case DW_OP_eq:
609 case DW_OP_lt:
610 case DW_OP_gt:
611 case DW_OP_ne:
612 {
613 /* Binary operations. Use the value engine to do computations in
614 the right width. */
615 CORE_ADDR first, second;
616 enum exp_opcode binop;
617 struct value *val1, *val2;
618
619 second = dwarf_expr_fetch (ctx, 0);
620 dwarf_expr_pop (ctx);
621
622 first = dwarf_expr_fetch (ctx, 0);
623 dwarf_expr_pop (ctx);
624
625 val1 = value_from_longest (unsigned_address_type (), first);
626 val2 = value_from_longest (unsigned_address_type (), second);
627
628 switch (op)
629 {
630 case DW_OP_and:
631 binop = BINOP_BITWISE_AND;
632 break;
633 case DW_OP_div:
634 binop = BINOP_DIV;
635 break;
636 case DW_OP_minus:
637 binop = BINOP_SUB;
638 break;
639 case DW_OP_mod:
640 binop = BINOP_MOD;
641 break;
642 case DW_OP_mul:
643 binop = BINOP_MUL;
644 break;
645 case DW_OP_or:
646 binop = BINOP_BITWISE_IOR;
647 break;
648 case DW_OP_plus:
649 binop = BINOP_ADD;
650 break;
651 case DW_OP_shl:
652 binop = BINOP_LSH;
653 break;
654 case DW_OP_shr:
655 binop = BINOP_RSH;
656 break;
657 case DW_OP_shra:
658 binop = BINOP_RSH;
659 val1 = value_from_longest (signed_address_type (), first);
660 break;
661 case DW_OP_xor:
662 binop = BINOP_BITWISE_XOR;
663 break;
664 case DW_OP_le:
665 binop = BINOP_LEQ;
666 break;
667 case DW_OP_ge:
668 binop = BINOP_GEQ;
669 break;
670 case DW_OP_eq:
671 binop = BINOP_EQUAL;
672 break;
673 case DW_OP_lt:
674 binop = BINOP_LESS;
675 break;
676 case DW_OP_gt:
677 binop = BINOP_GTR;
678 break;
679 case DW_OP_ne:
680 binop = BINOP_NOTEQUAL;
681 break;
682 default:
683 internal_error (__FILE__, __LINE__,
684 _("Can't be reached."));
685 }
686 result = value_as_long (value_binop (val1, val2, binop));
687 }
688 break;
689
690 case DW_OP_GNU_push_tls_address:
691 /* Variable is at a constant offset in the thread-local
692 storage block into the objfile for the current thread and
693 the dynamic linker module containing this expression. Here
694 we return returns the offset from that base. The top of the
695 stack has the offset from the beginning of the thread
696 control block at which the variable is located. Nothing
697 should follow this operator, so the top of stack would be
698 returned. */
699 result = dwarf_expr_fetch (ctx, 0);
700 dwarf_expr_pop (ctx);
701 result = (ctx->get_tls_address) (ctx->baton, result);
702 break;
703
704 case DW_OP_skip:
705 offset = extract_signed_integer (op_ptr, 2);
706 op_ptr += 2;
707 op_ptr += offset;
708 goto no_push;
709
710 case DW_OP_bra:
711 offset = extract_signed_integer (op_ptr, 2);
712 op_ptr += 2;
713 if (dwarf_expr_fetch (ctx, 0) != 0)
714 op_ptr += offset;
715 dwarf_expr_pop (ctx);
716 goto no_push;
717
718 case DW_OP_nop:
719 goto no_push;
720
721 case DW_OP_piece:
722 {
723 ULONGEST size;
724 CORE_ADDR addr_or_regnum;
725
726 /* Record the piece. */
727 op_ptr = read_uleb128 (op_ptr, op_end, &size);
728 addr_or_regnum = dwarf_expr_fetch (ctx, 0);
729 add_piece (ctx, ctx->in_reg, addr_or_regnum, size);
730
731 /* Pop off the address/regnum, and clear the in_reg flag. */
732 dwarf_expr_pop (ctx);
733 ctx->in_reg = 0;
734 }
735 goto no_push;
736
737 case DW_OP_GNU_uninit:
738 if (op_ptr != op_end)
739 error (_("DWARF-2 expression error: DW_OP_GNU_unint must always "
740 "be the very last op."));
741
742 ctx->initialized = 0;
743 goto no_push;
744
745 default:
746 error (_("Unhandled dwarf expression opcode 0x%x"), op);
747 }
748
749 /* Most things push a result value. */
750 dwarf_expr_push (ctx, result);
751 no_push:;
752 }
753 }
This page took 0.044671 seconds and 3 git commands to generate.