+ /* Input names that aren't symbols but ARE valid hex numbers, when
+ the input radix permits them, can be names or numbers depending
+ on the parse. Note we support radixes > 16 here. */
+ if (!sym
+ && ((copy[0] >= 'a' && copy[0] < 'a' + input_radix - 10)
+ || (copy[0] >= 'A' && copy[0] < 'A' + input_radix - 10)))
+ {
+ YYSTYPE newlval; /* Its value is ignored. */
+ int hextype = parse_number (copy, yylval.sval.length, 0, &newlval);
+ if (hextype == INT)
+ {
+ yylval.ssym.sym = sym;
+ yylval.ssym.is_a_field_of_this = is_a_field_of_this.type != NULL;
+ return NAME_OR_INT;
+ }
+ }
+
+ /* Any other kind of symbol */
+ yylval.ssym.sym = sym;
+ yylval.ssym.is_a_field_of_this = is_a_field_of_this.type != NULL;
+
+ if (sym == NULL
+ && parse_language->la_language == language_cplus
+ && is_a_field_of_this.type == NULL
+ && !lookup_minimal_symbol (copy, NULL, NULL))
+ return UNKNOWN_CPP_NAME;
+
+ return NAME;
+}
+
+/* Like classify_name, but used by the inner loop of the lexer, when a
+ name might have already been seen. CONTEXT is the context type, or
+ NULL if this is the first component of a name. */
+
+static int
+classify_inner_name (const struct block *block, struct type *context)
+{
+ struct type *type;
+ char *copy;
+
+ if (context == NULL)
+ return classify_name (block);
+
+ type = check_typedef (context);
+ if (TYPE_CODE (type) != TYPE_CODE_STRUCT
+ && TYPE_CODE (type) != TYPE_CODE_UNION
+ && TYPE_CODE (type) != TYPE_CODE_NAMESPACE)
+ return ERROR;
+
+ copy = copy_name (yylval.ssym.stoken);
+ yylval.ssym.sym = cp_lookup_nested_symbol (type, copy, block);
+ if (yylval.ssym.sym == NULL)
+ return ERROR;
+
+ switch (SYMBOL_CLASS (yylval.ssym.sym))
+ {
+ case LOC_BLOCK:
+ case LOC_LABEL:
+ return ERROR;
+
+ case LOC_TYPEDEF:
+ yylval.tsym.type = SYMBOL_TYPE (yylval.ssym.sym);;
+ return TYPENAME;
+
+ default:
+ return NAME;
+ }
+ internal_error (__FILE__, __LINE__, _("not reached"));
+}
+
+/* The outer level of a two-level lexer. This calls the inner lexer
+ to return tokens. It then either returns these tokens, or
+ aggregates them into a larger token. This lets us work around a
+ problem in our parsing approach, where the parser could not
+ distinguish between qualified names and qualified types at the
+ right point.
+
+ This approach is still not ideal, because it mishandles template
+ types. See the comment in lex_one_token for an example. However,
+ this is still an improvement over the earlier approach, and will
+ suffice until we move to better parsing technology. */
+static int
+yylex (void)
+{
+ token_and_value current;
+ int first_was_coloncolon, last_was_coloncolon;
+ struct type *context_type = NULL;
+ int last_to_examine, next_to_examine, checkpoint;
+ const struct block *search_block;
+
+ if (popping && !VEC_empty (token_and_value, token_fifo))
+ goto do_pop;
+ popping = 0;
+
+ /* Read the first token and decide what to do. Most of the
+ subsequent code is C++-only; but also depends on seeing a "::" or
+ name-like token. */
+ current.token = lex_one_token ();
+ if (current.token == NAME)
+ current.token = classify_name (expression_context_block);
+ if (parse_language->la_language != language_cplus
+ || (current.token != TYPENAME && current.token != COLONCOLON
+ && current.token != FILENAME))
+ return current.token;
+
+ /* Read any sequence of alternating "::" and name-like tokens into
+ the token FIFO. */
+ current.value = yylval;
+ VEC_safe_push (token_and_value, token_fifo, ¤t);
+ last_was_coloncolon = current.token == COLONCOLON;
+ while (1)
+ {
+ current.token = lex_one_token ();
+ current.value = yylval;
+ VEC_safe_push (token_and_value, token_fifo, ¤t);
+
+ if ((last_was_coloncolon && current.token != NAME)
+ || (!last_was_coloncolon && current.token != COLONCOLON))
+ break;
+ last_was_coloncolon = !last_was_coloncolon;
+ }
+ popping = 1;
+
+ /* We always read one extra token, so compute the number of tokens
+ to examine accordingly. */
+ last_to_examine = VEC_length (token_and_value, token_fifo) - 2;
+ next_to_examine = 0;
+
+ current = *VEC_index (token_and_value, token_fifo, next_to_examine);
+ ++next_to_examine;
+
+ obstack_free (&name_obstack, obstack_base (&name_obstack));
+ checkpoint = 0;
+ if (current.token == FILENAME)
+ search_block = current.value.bval;
+ else if (current.token == COLONCOLON)
+ search_block = NULL;
+ else
+ {
+ gdb_assert (current.token == TYPENAME);
+ search_block = expression_context_block;
+ obstack_grow (&name_obstack, current.value.sval.ptr,
+ current.value.sval.length);
+ context_type = current.value.tsym.type;
+ checkpoint = 1;
+ }
+
+ first_was_coloncolon = current.token == COLONCOLON;
+ last_was_coloncolon = first_was_coloncolon;
+
+ while (next_to_examine <= last_to_examine)
+ {
+ token_and_value *next;
+
+ next = VEC_index (token_and_value, token_fifo, next_to_examine);
+ ++next_to_examine;
+
+ if (next->token == NAME && last_was_coloncolon)
+ {
+ int classification;
+
+ yylval = next->value;
+ classification = classify_inner_name (search_block, context_type);
+ /* We keep going until we either run out of names, or until
+ we have a qualified name which is not a type. */
+ if (classification != TYPENAME && classification != NAME)
+ break;
+
+ /* Accept up to this token. */
+ checkpoint = next_to_examine;
+
+ /* Update the partial name we are constructing. */
+ if (context_type != NULL)
+ {
+ /* We don't want to put a leading "::" into the name. */
+ obstack_grow_str (&name_obstack, "::");
+ }
+ obstack_grow (&name_obstack, next->value.sval.ptr,
+ next->value.sval.length);
+
+ yylval.sval.ptr = obstack_base (&name_obstack);
+ yylval.sval.length = obstack_object_size (&name_obstack);
+ current.value = yylval;
+ current.token = classification;
+
+ last_was_coloncolon = 0;
+
+ if (classification == NAME)
+ break;
+
+ context_type = yylval.tsym.type;
+ }
+ else if (next->token == COLONCOLON && !last_was_coloncolon)
+ last_was_coloncolon = 1;
+ else
+ {
+ /* We've reached the end of the name. */
+ break;
+ }
+ }
+
+ /* If we have a replacement token, install it as the first token in
+ the FIFO, and delete the other constituent tokens. */
+ if (checkpoint > 0)
+ {
+ current.value.sval.ptr = obstack_copy0 (&expansion_obstack,
+ current.value.sval.ptr,
+ current.value.sval.length);
+
+ VEC_replace (token_and_value, token_fifo, 0, ¤t);
+ if (checkpoint > 1)
+ VEC_block_remove (token_and_value, token_fifo, 1, checkpoint - 1);
+ }
+
+ do_pop:
+ current = *VEC_index (token_and_value, token_fifo, 0);
+ VEC_ordered_remove (token_and_value, token_fifo, 0);
+ yylval = current.value;
+ return current.token;
+}
+
+int
+c_parse (void)
+{
+ int result;
+ struct cleanup *back_to = make_cleanup (free_current_contents,
+ &expression_macro_scope);
+
+ /* Set up the scope for macro expansion. */
+ expression_macro_scope = NULL;
+
+ if (expression_context_block)
+ expression_macro_scope
+ = sal_macro_scope (find_pc_line (expression_context_pc, 0));
+ else
+ expression_macro_scope = default_macro_scope ();
+ if (! expression_macro_scope)
+ expression_macro_scope = user_macro_scope ();
+
+ /* Initialize macro expansion code. */
+ obstack_init (&expansion_obstack);
+ gdb_assert (! macro_original_text);
+ make_cleanup (scan_macro_cleanup, 0);
+
+ make_cleanup_restore_integer (&yydebug);
+ yydebug = parser_debug;
+
+ /* Initialize some state used by the lexer. */
+ last_was_structop = 0;
+ saw_name_at_eof = 0;
+
+ VEC_free (token_and_value, token_fifo);
+ popping = 0;
+ obstack_init (&name_obstack);
+ make_cleanup_obstack_free (&name_obstack);
+
+ result = yyparse ();
+ do_cleanups (back_to);
+ return result;
+}
+
+/* This is called via the YYPRINT macro when parser debugging is
+ enabled. It prints a token's value. */
+
+static void
+c_print_token (FILE *file, int type, YYSTYPE value)
+{
+ switch (type)
+ {
+ case INT:
+ fprintf (file, "typed_val_int<%s, %s>",
+ TYPE_SAFE_NAME (value.typed_val_int.type),
+ pulongest (value.typed_val_int.val));
+ break;
+
+ case CHAR:
+ case STRING: