More glorious scoped enums

This commit is contained in:
Charles Dang 2022-03-21 11:00:17 -04:00
parent d09f472b1a
commit f60de6ab51
8 changed files with 199 additions and 189 deletions

View File

@ -133,7 +133,7 @@ protected:
return variant(tmp);
}
static inline void add_input(formula_input_vector& inputs, const std::string& key, FORMULA_ACCESS_TYPE access_type = FORMULA_READ_ONLY)
static inline void add_input(formula_input_vector& inputs, const std::string& key, formula_access access_type = formula_access::read_only)
{
inputs.emplace_back(key, access_type);
}
@ -301,7 +301,7 @@ private:
}
for(const auto& i : values_) {
add_input(inputs, i.first, FORMULA_READ_WRITE);
add_input(inputs, i.first, formula_access::read_write);
}
}

View File

@ -29,14 +29,14 @@ struct callable_die_subscriber {
virtual ~callable_die_subscriber() {}
};
enum FORMULA_ACCESS_TYPE { FORMULA_READ_ONLY, FORMULA_WRITE_ONLY, FORMULA_READ_WRITE };
enum class formula_access { read_only, write_only, read_write };
struct formula_input {
explicit formula_input(const std::string& name, FORMULA_ACCESS_TYPE access = FORMULA_READ_WRITE)
explicit formula_input(const std::string& name, formula_access access = formula_access::read_write)
: name(name), access(access) {}
std::string name;
FORMULA_ACCESS_TYPE access;
formula_access access;
};
using formula_input_vector = std::vector<formula_input>;

View File

@ -127,11 +127,11 @@ formula::formula(const std::string& text, function_symbol_table* symbols)
try {
tokens.push_back(tk::get_token(i1,i2));
tk::TOKEN_TYPE current_type = tokens.back().type;
tk::token_type current_type = tokens.back().type;
if(current_type == tk::TOKEN_WHITESPACE) {
if(current_type == tk::token_type::whitespace) {
tokens.pop_back();
} else if(current_type == tk::TOKEN_COMMENT) {
} else if(current_type == tk::token_type::comment) {
// Since we can have multiline comments, let's see how many EOL are within it
int counter = 0;
@ -144,16 +144,16 @@ formula::formula(const std::string& text, function_symbol_table* symbols)
files.back().second += counter;
tokens.pop_back();
} else if(current_type == tk::TOKEN_EOL) {
} else if(current_type == tk::token_type::eol) {
files.back().second++;
tokens.pop_back();
} else if((current_type == tk::TOKEN_KEYWORD) && (std::string(tokens.back().begin, tokens.back().end) == "fai")) {
} else if((current_type == tk::token_type::keyword) && (std::string(tokens.back().begin, tokens.back().end) == "fai")) {
fai_keyword = true;
tokens.pop_back();
} else if((current_type == tk::TOKEN_KEYWORD) && (std::string(tokens.back().begin, tokens.back().end) == "wfl")) {
} else if((current_type == tk::token_type::keyword) && (std::string(tokens.back().begin, tokens.back().end) == "wfl")) {
wfl_keyword = true;
tokens.pop_back();
} else if((current_type == tk::TOKEN_KEYWORD) && (std::string(tokens.back().begin, tokens.back().end) == "faiend")) {
} else if((current_type == tk::token_type::keyword) && (std::string(tokens.back().begin, tokens.back().end) == "faiend")) {
if(files.size() > 1) {
files.pop_back();
filenames_it = filenames.find(files.back().first);
@ -162,7 +162,7 @@ formula::formula(const std::string& text, function_symbol_table* symbols)
} else {
throw formula_error("Unexpected 'faiend' found", "", "", 0);
}
} else if((current_type == tk::TOKEN_KEYWORD) && (std::string(tokens.back().begin, tokens.back().end) == "wflend")) {
} else if((current_type == tk::token_type::keyword) && (std::string(tokens.back().begin, tokens.back().end) == "wflend")) {
if(files.size() > 1) {
files.pop_back();
filenames_it = filenames.find(files.back().first);
@ -172,7 +172,7 @@ formula::formula(const std::string& text, function_symbol_table* symbols)
throw formula_error("Unexpected 'wflend' found", "", "", 0);
}
} else if(fai_keyword || wfl_keyword) {
if(current_type == tk::TOKEN_STRING_LITERAL) {
if(current_type == tk::token_type::string_literal) {
std::string str = std::string(tokens.back().begin, tokens.back().end);
files.emplace_back(str , 1);
@ -506,10 +506,10 @@ public:
void get_inputs(formula_input_vector& inputs) const
{
add_input(inputs, "size", FORMULA_READ_WRITE);
add_input(inputs, "empty", FORMULA_READ_WRITE);
add_input(inputs, "first", FORMULA_READ_WRITE);
add_input(inputs, "last", FORMULA_READ_WRITE);
add_input(inputs, "size", formula_access::read_write);
add_input(inputs, "empty", formula_access::read_write);
add_input(inputs, "first", formula_access::read_write);
add_input(inputs, "last", formula_access::read_write);
}
variant get_value(const std::string& key) const
@ -546,8 +546,8 @@ public:
void get_inputs(formula_input_vector& inputs) const
{
add_input(inputs, "size", FORMULA_READ_WRITE);
add_input(inputs, "empty", FORMULA_READ_WRITE);
add_input(inputs, "size", formula_access::read_write);
add_input(inputs, "empty", formula_access::read_write);
for(const auto& v : map_) {
// variant_iterator does not implement operator->,
@ -1107,21 +1107,21 @@ static void parse_function_args(const tk::token* &i1, const tk::token* i2, std::
{
const tk::token* begin = i1, *end = i2; // These are used for error reporting
if(i1->type == tk::TOKEN_LPARENS) {
if(i1->type == tk::token_type::lparens) {
++i1;
} else {
throw formula_error("Invalid function definition", tokens_to_string(begin,end - 1), *i1->filename, i1->line_number);
}
while((i1-> type != tk::TOKEN_RPARENS) && (i1 != i2)) {
if(i1->type == tk::TOKEN_IDENTIFIER) {
while((i1-> type != tk::token_type::rparens) && (i1 != i2)) {
if(i1->type == tk::token_type::identifier) {
if(std::string((i1+1)->begin, (i1+1)->end) == "*") {
res->push_back(std::string(i1->begin, i1->end) + std::string("*"));
++i1;
} else {
res->push_back(std::string(i1->begin, i1->end));
}
} else if(i1->type == tk::TOKEN_COMMA) {
} else if(i1->type == tk::token_type::comma) {
//do nothing
} else {
throw formula_error("Invalid function definition", tokens_to_string(begin,end - 1), *i1->filename, i1->line_number);
@ -1130,7 +1130,7 @@ static void parse_function_args(const tk::token* &i1, const tk::token* i2, std::
++i1;
}
if(i1->type != tk::TOKEN_RPARENS) {
if(i1->type != tk::token_type::rparens) {
throw formula_error("Invalid function definition", tokens_to_string(begin,end - 1), *i1->filename, i1->line_number);
}
@ -1144,11 +1144,11 @@ static void parse_args(const tk::token* i1, const tk::token* i2,
int parens = 0;
const tk::token* beg = i1;
while(i1 != i2) {
if(i1->type == tk::TOKEN_LPARENS || i1->type == tk::TOKEN_LSQUARE ) {
if(i1->type == tk::token_type::lparens || i1->type == tk::token_type::lsquare ) {
++parens;
} else if(i1->type == tk::TOKEN_RPARENS || i1->type == tk::TOKEN_RSQUARE ) {
} else if(i1->type == tk::token_type::rparens || i1->type == tk::token_type::rsquare ) {
--parens;
} else if(i1->type == tk::TOKEN_COMMA && !parens) {
} else if(i1->type == tk::token_type::comma && !parens) {
res->push_back(parse_expression(beg, i1, symbols));
beg = i1+1;
}
@ -1170,11 +1170,11 @@ static void parse_set_args(const tk::token* i1, const tk::token* i2,
const tk::token* beg = i1;
const tk::token* begin = i1, *end = i2; // These are used for error reporting
while(i1 != i2) {
if(i1->type == tk::TOKEN_LPARENS || i1->type == tk::TOKEN_LSQUARE) {
if(i1->type == tk::token_type::lparens || i1->type == tk::token_type::lsquare) {
++parens;
} else if(i1->type == tk::TOKEN_RPARENS || i1->type == tk::TOKEN_RSQUARE) {
} else if(i1->type == tk::token_type::rparens || i1->type == tk::token_type::rsquare) {
--parens;
} else if(i1->type == tk::TOKEN_POINTER && !parens ) {
} else if(i1->type == tk::token_type::pointer && !parens ) {
if(!check_pointer) {
check_pointer = true;
res->push_back(parse_expression(beg, i1, symbols));
@ -1182,7 +1182,7 @@ static void parse_set_args(const tk::token* i1, const tk::token* i2,
} else {
throw formula_error("Too many '->' operators found", tokens_to_string(begin,end - 1), *i1->filename, i1->line_number);
}
} else if(i1->type == tk::TOKEN_COMMA && !parens ) {
} else if(i1->type == tk::token_type::comma && !parens ) {
if(check_pointer)
check_pointer = false;
else {
@ -1209,12 +1209,12 @@ static void parse_where_clauses(const tk::token* i1, const tk::token* i2, expr_t
std::string var_name;
while(i1 != i2) {
if(i1->type == tk::TOKEN_LPARENS || i1->type == tk::TOKEN_LSQUARE) {
if(i1->type == tk::token_type::lparens || i1->type == tk::token_type::lsquare) {
++parens;
} else if(i1->type == tk::TOKEN_RPARENS || i1->type == tk::TOKEN_RSQUARE) {
} else if(i1->type == tk::token_type::rparens || i1->type == tk::token_type::rsquare) {
--parens;
} else if(!parens) {
if(i1->type == tk::TOKEN_COMMA) {
if(i1->type == tk::token_type::comma) {
if(var_name.empty()) {
throw formula_error("There is 'where <expression>' but 'where name=<expression>' was needed",
tokens_to_string(begin, end - 1), *i1->filename, i1->line_number);
@ -1223,11 +1223,11 @@ static void parse_where_clauses(const tk::token* i1, const tk::token* i2, expr_t
(*res)[var_name] = parse_expression(beg, i1, symbols);
beg = i1+1;
var_name = "";
} else if(i1->type == tk::TOKEN_OPERATOR) {
} else if(i1->type == tk::token_type::operator_token) {
std::string op_name(i1->begin, i1->end);
if(op_name == "=") {
if(beg->type != tk::TOKEN_IDENTIFIER) {
if(beg->type != tk::token_type::identifier) {
if(i1 == original_i1_cached) {
throw formula_error("There is 'where <expression>' but 'where name=<expression>' was needed",
tokens_to_string(begin, end - 1), *i1->filename, i1->line_number);
@ -1275,7 +1275,7 @@ expression_ptr parse_expression(const tk::token* i1, const tk::token* i2, functi
const tk::token* begin = i1, *end = i2; // These are used for error reporting
if(i1->type == tk::TOKEN_KEYWORD && (i1 + 1)->type == tk::TOKEN_IDENTIFIER) {
if(i1->type == tk::token_type::keyword && (i1 + 1)->type == tk::token_type::identifier) {
if(std::string(i1->begin, i1->end) == "def") {
++i1;
const std::string formula_name = std::string(i1->begin, i1->end);
@ -1284,7 +1284,7 @@ expression_ptr parse_expression(const tk::token* i1, const tk::token* i2, functi
parse_function_args(++i1, i2, &args);
const tk::token* beg = i1;
while((i1 != i2) && (i1->type != tk::TOKEN_SEMICOLON)) {
while((i1 != i2) && (i1->type != tk::token_type::semicolon)) {
++i1;
}
@ -1313,11 +1313,11 @@ expression_ptr parse_expression(const tk::token* i1, const tk::token* i2, functi
bool operator_group = false;
for(const tk::token* i = i1; i != i2; ++i) {
if(i->type == tk::TOKEN_LPARENS || i->type == tk::TOKEN_LSQUARE) {
if(i->type == tk::token_type::lparens || i->type == tk::token_type::lsquare) {
++parens;
} else if(i->type == tk::TOKEN_RPARENS || i->type == tk::TOKEN_RSQUARE) {
} else if(i->type == tk::token_type::rparens || i->type == tk::token_type::rsquare) {
--parens;
} else if(parens == 0 && i->type == tk::TOKEN_OPERATOR) {
} else if(parens == 0 && i->type == tk::token_type::operator_token) {
if((!operator_group ) && (op == nullptr || operator_precedence(*op) >= operator_precedence(*i))) {
// Need special exception for exponentiation to be right-associative
if(*i->begin != '^' || op == nullptr || *op->begin != '^') {
@ -1331,29 +1331,29 @@ expression_ptr parse_expression(const tk::token* i1, const tk::token* i2, functi
}
if(op == nullptr) {
if(i1->type == tk::TOKEN_LPARENS && (i2-1)->type == tk::TOKEN_RPARENS) {
if(i1->type == tk::token_type::lparens && (i2-1)->type == tk::token_type::rparens) {
return parse_expression(i1+1,i2-1,symbols);
} else if((i2-1)->type == tk::TOKEN_RSQUARE) { //check if there is [ ] : either a list/map definition, or a operator
} else if((i2-1)->type == tk::token_type::rsquare) { //check if there is [ ] : either a list/map definition, or a operator
// First, a special case for an empty map
if(i2 - i1 == 3 && i1->type == tk::TOKEN_LSQUARE && (i1+1)->type == tk::TOKEN_POINTER) {
if(i2 - i1 == 3 && i1->type == tk::token_type::lsquare && (i1+1)->type == tk::token_type::pointer) {
return std::make_shared<map_expression>(std::vector<expression_ptr>());
}
const tk::token* tok = i2-2;
int square_parens = 0;
bool is_map = false;
while ((tok->type != tk::TOKEN_LSQUARE || square_parens) && tok != i1) {
if(tok->type == tk::TOKEN_RSQUARE) {
while ((tok->type != tk::token_type::lsquare || square_parens) && tok != i1) {
if(tok->type == tk::token_type::rsquare) {
square_parens++;
} else if(tok->type == tk::TOKEN_LSQUARE) {
} else if(tok->type == tk::token_type::lsquare) {
square_parens--;
} else if((tok->type == tk::TOKEN_POINTER) && !square_parens ) {
} else if((tok->type == tk::token_type::pointer) && !square_parens ) {
is_map = true;
}
--tok;
}
if(tok->type == tk::TOKEN_LSQUARE) {
if(tok->type == tk::token_type::lsquare) {
if(tok == i1) {
// Create a list or a map
std::vector<expression_ptr> args;
@ -1378,16 +1378,16 @@ expression_ptr parse_expression(const tk::token* i1, const tk::token* i2, functi
}
}
} else if(i2 - i1 == 1) {
if(i1->type == tk::TOKEN_KEYWORD) {
if(i1->type == tk::token_type::keyword) {
if(std::string(i1->begin, i1->end) == "functions") {
return std::make_shared<function_list_expression>(symbols);
}
} else if(i1->type == tk::TOKEN_IDENTIFIER) {
} else if(i1->type == tk::token_type::identifier) {
return std::make_shared<identifier_expression>(std::string(i1->begin, i1->end));
} else if(i1->type == tk::TOKEN_INTEGER) {
} else if(i1->type == tk::token_type::integer) {
int n = std::stoi(std::string(i1->begin, i1->end));
return std::make_shared<integer_expression>(n);
} else if(i1->type == tk::TOKEN_DECIMAL) {
} else if(i1->type == tk::token_type::decimal) {
tk::iterator dot = i1->begin;
while(*dot != '.') {
++dot;
@ -1413,19 +1413,19 @@ expression_ptr parse_expression(const tk::token* i1, const tk::token* i2, functi
}
return std::make_shared<decimal_expression>(n, f);
} else if(i1->type == tk::TOKEN_STRING_LITERAL) {
} else if(i1->type == tk::token_type::string_literal) {
return std::make_shared<string_expression>(std::string(i1->begin + 1, i1->end - 1));
}
} else if(i1->type == tk::TOKEN_IDENTIFIER &&
(i1+1)->type == tk::TOKEN_LPARENS &&
(i2-1)->type == tk::TOKEN_RPARENS)
} else if(i1->type == tk::token_type::identifier &&
(i1+1)->type == tk::token_type::lparens &&
(i2-1)->type == tk::token_type::rparens)
{
const tk::token* function_call_begin = i1, *function_call_end = i2; // These are used for error reporting
int nleft = 0, nright = 0;
for(const tk::token* i = i1; i != i2; ++i) {
if(i->type == tk::TOKEN_LPARENS) {
if(i->type == tk::token_type::lparens) {
++nleft;
} else if(i->type == tk::TOKEN_RPARENS) {
} else if(i->type == tk::token_type::rparens) {
++nright;
}
}

View File

@ -47,73 +47,73 @@ token get_token(iterator& i1, const iterator i2) {
if( *i1 >= 'A' ) {
//current character is >= 'A', limit search to the upper-half of the ASCII table
// check if we parse now TOKEN_IDENTIFIER or TOKEN_OPERATOR/KEYWORD based on string
// check if we parse now token_type::identifier or token_type::operator_token/keyword based on string
if( *i1 <= 'Z' || ( *i1 >= 'a' && *it <= 'z' ) || *i1 == '_' ) {
while(i1 != i2 && (std::isalpha(*i1, std::locale::classic()) || *i1 == '_'))
++i1;
int diff = i1 - it;
TOKEN_TYPE t = TOKEN_IDENTIFIER;
token_type t = token_type::identifier;
//check if this string matches any keyword or an operator
//possible operators and keywords:
// d, or, in, def, and, not, wfl, where, wflend, functions
if( diff == 1 ) {
if( *it == 'd' )
t = TOKEN_OPERATOR;
t = token_type::operator_token;
} else if( diff == 2 ) {
if( *it == 'o' && *(it+1) == 'r' )
t = TOKEN_OPERATOR;
t = token_type::operator_token;
else if( *it == 'i' && *(it+1) == 'n' )
t = TOKEN_OPERATOR;
t = token_type::operator_token;
} else if( diff == 3 ) {
if( *it == 'd' ) { //def
if( *(it+1) == 'e' && *(it+2) == 'f' )
t = TOKEN_KEYWORD;
t = token_type::keyword;
} else if( *it == 'a' ) { //and
if( *(it+1) == 'n' && *(it+2) == 'd' )
t = TOKEN_OPERATOR;
t = token_type::operator_token;
} else if( *it == 'n' ) { //not
if( *(it+1) == 'o' && *(it+2) == 't' )
t = TOKEN_OPERATOR;
t = token_type::operator_token;
} else if( *it == 'f' ) { //fai
if( *(it+1) == 'a' && *(it+2) == 'i' )
t = TOKEN_KEYWORD;
t = token_type::keyword;
} else if( *it == 'w' ) { //wfl
if( *(it+1) == 'f' && *(it+2) == 'l' )
t = TOKEN_KEYWORD;
t = token_type::keyword;
}
} else if( diff == 5 ) {
std::string s(it, i1);
if( s == "where" )
t = TOKEN_OPERATOR;
t = token_type::operator_token;
} else if( diff == 6 ) {
std::string s(it, i1);
if( s == "faiend" )
t = TOKEN_KEYWORD;
t = token_type::keyword;
else if( s == "wflend" )
t = TOKEN_KEYWORD;
t = token_type::keyword;
} else if( diff == 9 ) {
std::string s(it, i1);
if( s == "functions" )
t = TOKEN_KEYWORD;
t = token_type::keyword;
}
return token( it, i1, t);
} else {
//at this point only 3 chars left to check:
if( *i1 == '[' )
return token( it, ++i1, TOKEN_LSQUARE );
return token( it, ++i1, token_type::lsquare );
if( *i1 == ']' )
return token( it, ++i1, TOKEN_RSQUARE );
return token( it, ++i1, token_type::rsquare );
if( *i1 == '^' )
return token( it, ++i1, TOKEN_OPERATOR );
return token( it, ++i1, token_type::operator_token );
if( *i1 == '~' )
return token( it, ++i1, TOKEN_OPERATOR );
return token( it, ++i1, token_type::operator_token );
//unused characters in this range:
// \ ` { | }
@ -124,13 +124,13 @@ token get_token(iterator& i1, const iterator i2) {
//start by checking for whitespaces/end of line char
if( *i1 <= ' ' ) {
if( *i1 == '\n' ) {
return token( it, ++i1, TOKEN_EOL);
return token( it, ++i1, token_type::eol);
} else {
while( i1 != i2 && *i1 <= ' ' && *i1 != '\n' )
++i1;
return token( it, i1, TOKEN_WHITESPACE );
return token( it, i1, token_type::whitespace );
}
//try to further limit number of characters that we need to check:
} else if ( *i1 >= '0' ){
@ -158,9 +158,9 @@ token get_token(iterator& i1, const iterator i2) {
}
if( dot )
return token( it, i1, TOKEN_DECIMAL );
return token( it, i1, token_type::decimal );
else
return token( it, i1, TOKEN_INTEGER );
return token( it, i1, token_type::integer );
} else {
//current character is between ':' and '@'
@ -170,27 +170,27 @@ token get_token(iterator& i1, const iterator i2) {
// : ? @
if( *i1 == ';' ) {
return token( it, ++i1, TOKEN_SEMICOLON);
return token( it, ++i1, token_type::semicolon);
} else if( *i1 == '=' ) {
return token( it, ++i1, TOKEN_OPERATOR);
return token( it, ++i1, token_type::operator_token);
} else if( *i1 == '<' ) {
++i1;
if( i1 != i2 ) {
if( *i1 == '=' )
return token( it, ++i1, TOKEN_OPERATOR);
return token( it, ++i1, token_type::operator_token);
else
return token( it, i1, TOKEN_OPERATOR);
return token( it, i1, token_type::operator_token);
} else
return token( it, i1, TOKEN_OPERATOR);
return token( it, i1, token_type::operator_token);
} else if( *i1 == '>' ) {
++i1;
if( i1 != i2 ) {
if( *i1 == '=' )
return token( it, ++i1, TOKEN_OPERATOR);
return token( it, ++i1, token_type::operator_token);
else
return token( it, i1, TOKEN_OPERATOR);
return token( it, i1, token_type::operator_token);
} else
return token( it, i1, TOKEN_OPERATOR);
return token( it, i1, token_type::operator_token);
}
}
//current character is between '!' and '/'
@ -201,25 +201,25 @@ token get_token(iterator& i1, const iterator i2) {
// ! is used only as part of !=
// Note: " should never be used since it plays poorly with WML
} else if ( *i1 == ',' ) {
return token( it, ++i1, TOKEN_COMMA);
return token( it, ++i1, token_type::comma);
} else if ( *i1 == '.' ) {
++i1;
if( i1 != i2 ) {
if( *i1 == '+' || *i1 == '-' || *i1 == '*' || *i1 == '/' || *i1 == '.')
return token( it, ++i1, TOKEN_OPERATOR );
return token( it, ++i1, token_type::operator_token );
else
return token( it, i1, TOKEN_OPERATOR );
return token( it, i1, token_type::operator_token );
} else {
return token( it, i1, TOKEN_OPERATOR);
return token( it, i1, token_type::operator_token);
}
} else if ( *i1 == '(' ) {
return token( it, ++i1, TOKEN_LPARENS);
return token( it, ++i1, token_type::lparens);
} else if ( *i1 == ')' ) {
return token( it, ++i1, TOKEN_RPARENS);
return token( it, ++i1, token_type::rparens);
} else if ( *i1 == '\'' ) {
int bracket_depth = 0;
@ -236,7 +236,7 @@ token get_token(iterator& i1, const iterator i2) {
}
if( i1 != i2 ) {
return token( it, ++i1, TOKEN_STRING_LITERAL );
return token( it, ++i1, token_type::string_literal );
} else {
raise_exception(it, i2, "Missing closing ' for formula string");
}
@ -247,39 +247,39 @@ token get_token(iterator& i1, const iterator i2) {
++i1;
if( i1 != i2 ) {
return token( it, ++i1, TOKEN_COMMENT );
return token( it, ++i1, token_type::comment );
} else {
raise_exception(it, i2, "Missing closing # for formula comment");
}
} else if ( *i1 == '+' ) {
return token( it, ++i1, TOKEN_OPERATOR);
return token( it, ++i1, token_type::operator_token);
} else if ( *i1 == '-' ) {
++i1;
if( i1 != i2 ) {
if( *i1 == '>' )
return token( it, ++i1, TOKEN_POINTER );
return token( it, ++i1, token_type::pointer );
else
return token( it, i1, TOKEN_OPERATOR );
return token( it, i1, token_type::operator_token );
} else {
return token( it, i1, TOKEN_OPERATOR);
return token( it, i1, token_type::operator_token);
}
} else if ( *i1 == '*' ) {
return token( it, ++i1, TOKEN_OPERATOR);
return token( it, ++i1, token_type::operator_token);
} else if ( *i1 == '/' ) {
return token( it, ++i1, TOKEN_OPERATOR);
return token( it, ++i1, token_type::operator_token);
} else if ( *i1 == '%' ) {
return token( it, ++i1, TOKEN_OPERATOR);
return token( it, ++i1, token_type::operator_token);
} else if ( *i1 == '!' ) {
++i1;
if( *i1 == '=' )
return token( it, ++i1, TOKEN_OPERATOR);
return token( it, ++i1, token_type::operator_token);
else
raise_exception(it, i2, std::string() );
}

View File

@ -24,19 +24,29 @@ namespace tokenizer
typedef std::string::const_iterator iterator;
/** TOKEN_TYPE is already defined in a Winnt.h (a windows header which is included under some conditions.) */
enum TOKEN_TYPE { TOKEN_OPERATOR, TOKEN_STRING_LITERAL,
TOKEN_IDENTIFIER, TOKEN_INTEGER, TOKEN_DECIMAL,
TOKEN_LPARENS, TOKEN_RPARENS,
TOKEN_LSQUARE, TOKEN_RSQUARE,
TOKEN_COMMA, TOKEN_SEMICOLON,
TOKEN_WHITESPACE, TOKEN_EOL, TOKEN_KEYWORD,
TOKEN_COMMENT, TOKEN_POINTER };
enum class token_type {
operator_token, // Cannot simply be named 'operator' since that's a reserved C++ keyword
string_literal,
identifier,
integer,
decimal,
lparens,
rparens,
lsquare,
rsquare,
comma,
semicolon,
whitespace,
eol,
keyword,
comment,
pointer
};
struct token {
token() :
type(TOKEN_COMMENT),
type(token_type::comment),
begin(),
end(),
line_number(1),
@ -44,7 +54,7 @@ struct token {
{
}
token(iterator& i1, iterator i2, TOKEN_TYPE type) :
token(iterator& i1, iterator i2, token_type type) :
type(type),
begin(i1),
end(i2),
@ -53,7 +63,7 @@ struct token {
{
}
TOKEN_TYPE type;
token_type type;
iterator begin, end;
int line_number;
const std::string* filename;

View File

@ -124,9 +124,9 @@ std::string variant_callable::get_debug_string(formula_seen_stack& seen, bool ve
first = false;
ss << input.name << " ";
if(input.access == FORMULA_READ_WRITE) {
if(input.access == formula_access::read_write) {
ss << "(read-write) ";
} else if(input.access == FORMULA_WRITE_ONLY) {
} else if(input.access == formula_access::write_only) {
ss << "(writeonly) ";
}

View File

@ -159,7 +159,7 @@ void luaW_pushfaivariant(lua_State* L, variant val) {
obj->get_inputs(inputs);
lua_newtable(L);
for(const formula_input& attr : inputs) {
if(attr.access == FORMULA_WRITE_ONLY) {
if(attr.access == formula_access::write_only) {
continue;
}
lua_pushstring(L, attr.name.c_str());

View File

@ -650,27 +650,27 @@ class preprocessor_data : public preprocessor
/** Description of a preprocessing chunk. */
struct token_desc
{
enum TOKEN_TYPE {
START, // Toplevel
PROCESS_IF, // Processing the "if" branch of a ifdef/ifndef (the "else" branch will be skipped)
PROCESS_ELSE, // Processing the "else" branch of a ifdef/ifndef
SKIP_IF, // Skipping the "if" branch of a ifdef/ifndef (the "else" branch, if any, will be processed)
SKIP_ELSE, // Skipping the "else" branch of a ifdef/ifndef
STRING, // Processing a string
VERBATIM, // Processing a verbatim string
MACRO_SPACE, // Processing between chunks of a macro call (skip spaces)
MACRO_CHUNK, // Processing inside a chunk of a macro call (stop on space or '(')
MACRO_PARENS // Processing a parenthesized macro argument
enum class token_type {
start, // Toplevel
process_if, // Processing the "if" branch of a ifdef/ifndef (the "else" branch will be skipped)
process_else, // Processing the "else" branch of a ifdef/ifndef
skip_if, // Skipping the "if" branch of a ifdef/ifndef (the "else" branch, if any, will be processed)
skip_else, // Skipping the "else" branch of a ifdef/ifndef
string, // Processing a string
verbatim, // Processing a verbatim string
macro_space, // Processing between chunks of a macro call (skip spaces)
macro_chunk, // Processing inside a chunk of a macro call (stop on space or '(')
macro_parens // Processing a parenthesized macro argument
};
token_desc(TOKEN_TYPE type, const int stack_pos, const int linenum)
token_desc(token_type type, const int stack_pos, const int linenum)
: type(type)
, stack_pos(stack_pos)
, linenum(linenum)
{
}
TOKEN_TYPE type;
token_type type;
/** Starting position in #strings_ of the delayed text for this chunk. */
int stack_pos;
@ -723,7 +723,7 @@ class preprocessor_data : public preprocessor
void skip_spaces();
void skip_eol();
void push_token(token_desc::TOKEN_TYPE);
void push_token(token_desc::token_type);
void pop_token();
void put(char);
void put(const std::string& /*, int change_line = 0 */);
@ -747,28 +747,28 @@ public:
return is_define_ ? PARSES_MACRO : PARSES_FILE;
}
friend bool operator==(preprocessor_data::token_desc::TOKEN_TYPE, char);
friend bool operator==(char, preprocessor_data::token_desc::TOKEN_TYPE);
friend bool operator!=(preprocessor_data::token_desc::TOKEN_TYPE, char);
friend bool operator!=(char, preprocessor_data::token_desc::TOKEN_TYPE);
friend bool operator==(preprocessor_data::token_desc::token_type, char);
friend bool operator==(char, preprocessor_data::token_desc::token_type);
friend bool operator!=(preprocessor_data::token_desc::token_type, char);
friend bool operator!=(char, preprocessor_data::token_desc::token_type);
};
bool operator==(preprocessor_data::token_desc::TOKEN_TYPE, char)
bool operator==(preprocessor_data::token_desc::token_type, char)
{
throw std::logic_error("don't compare tokens with characters");
}
bool operator==(char lhs, preprocessor_data::token_desc::TOKEN_TYPE rhs)
bool operator==(char lhs, preprocessor_data::token_desc::token_type rhs)
{
return rhs == lhs;
}
bool operator!=(preprocessor_data::token_desc::TOKEN_TYPE rhs, char lhs)
bool operator!=(preprocessor_data::token_desc::token_type rhs, char lhs)
{
return !(lhs == rhs);
}
bool operator!=(char lhs, preprocessor_data::token_desc::TOKEN_TYPE rhs)
bool operator!=(char lhs, preprocessor_data::token_desc::token_type rhs)
{
return rhs != lhs;
}
@ -874,27 +874,27 @@ preprocessor_data::preprocessor_data(preprocessor_streambuf& t,
t.textdomain_ = domain;
}
push_token(token_desc::START);
push_token(token_desc::token_type::start);
}
void preprocessor_data::push_token(token_desc::TOKEN_TYPE t)
void preprocessor_data::push_token(token_desc::token_type t)
{
tokens_.emplace_back(t, strings_.size(), linenum_);
if(t == token_desc::MACRO_SPACE) {
if(t == token_desc::token_type::macro_space) {
// Macro expansions do not have any associated storage at start.
return;
} else if(t == token_desc::STRING || t == token_desc::VERBATIM) {
} else if(t == token_desc::token_type::string || t == token_desc::token_type::verbatim) {
/* Quoted strings are always inlined in the parent token. So
* they need neither storage nor metadata, unless the parent
* token is a macro expansion.
*/
token_desc::TOKEN_TYPE& outer_type = tokens_[tokens_.size() - 2].type;
if(outer_type != token_desc::MACRO_SPACE) {
token_desc::token_type& outer_type = tokens_[tokens_.size() - 2].type;
if(outer_type != token_desc::token_type::macro_space) {
return;
}
outer_type = token_desc::MACRO_CHUNK;
outer_type = token_desc::token_type::macro_chunk;
tokens_.back().stack_pos = strings_.size() + 1;
}
@ -909,39 +909,39 @@ void preprocessor_data::push_token(token_desc::TOKEN_TYPE t)
void preprocessor_data::pop_token()
{
token_desc::TOKEN_TYPE inner_type = tokens_.back().type;
token_desc::token_type inner_type = tokens_.back().type;
unsigned stack_pos = tokens_.back().stack_pos;
tokens_.pop_back();
token_desc::TOKEN_TYPE& outer_type = tokens_.back().type;
token_desc::token_type& outer_type = tokens_.back().type;
if(inner_type == token_desc::MACRO_PARENS) {
if(inner_type == token_desc::token_type::macro_parens) {
// Parenthesized macro arguments are left on the stack.
assert(outer_type == token_desc::MACRO_SPACE);
assert(outer_type == token_desc::token_type::macro_space);
return;
}
if(inner_type == token_desc::STRING || inner_type == token_desc::VERBATIM) {
if(inner_type == token_desc::token_type::string || inner_type == token_desc::token_type::verbatim) {
// Quoted strings are always inlined.
assert(stack_pos == strings_.size());
return;
}
if(outer_type == token_desc::MACRO_SPACE) {
if(outer_type == token_desc::token_type::macro_space) {
/* A macro expansion does not have any associated storage.
* Instead, storage of the inner token is not discarded
* but kept as a new macro argument. But if the inner token
* was a macro expansion, it is about to be appended, so
* prepare for it.
*/
if(inner_type == token_desc::MACRO_SPACE || inner_type == token_desc::MACRO_CHUNK) {
if(inner_type == token_desc::token_type::macro_space || inner_type == token_desc::token_type::macro_chunk) {
strings_.erase(strings_.begin() + stack_pos, strings_.end());
strings_.emplace_back();
}
assert(stack_pos + 1 == strings_.size());
outer_type = token_desc::MACRO_CHUNK;
outer_type = token_desc::token_type::macro_chunk;
return;
}
@ -1083,7 +1083,7 @@ void preprocessor_data::conditional_skip(bool skip)
++skipping_;
}
push_token(skip ? token_desc::SKIP_ELSE : token_desc::PROCESS_IF);
push_token(skip ? token_desc::token_type::skip_else : token_desc::token_type::process_if);
}
bool preprocessor_data::get_chunk()
@ -1097,25 +1097,25 @@ bool preprocessor_data::get_chunk()
char const* s;
switch(token.type) {
case token_desc::START:
case token_desc::token_type::start:
return false; // everything is fine
case token_desc::PROCESS_IF:
case token_desc::SKIP_IF:
case token_desc::PROCESS_ELSE:
case token_desc::SKIP_ELSE:
case token_desc::token_type::process_if:
case token_desc::token_type::skip_if:
case token_desc::token_type::process_else:
case token_desc::token_type::skip_else:
s = "#ifdef or #ifndef";
break;
case token_desc::STRING:
case token_desc::token_type::string:
s = "Quoted string";
break;
case token_desc::VERBATIM:
case token_desc::token_type::verbatim:
s = "Verbatim string";
break;
case token_desc::MACRO_CHUNK:
case token_desc::MACRO_SPACE:
case token_desc::token_type::macro_chunk:
case token_desc::token_type::macro_space:
s = "Macro substitution";
break;
case token_desc::MACRO_PARENS:
case token_desc::token_type::macro_parens:
s = "Macro argument";
break;
default:
@ -1145,7 +1145,7 @@ bool preprocessor_data::get_chunk()
buffer += '\n';
// line_change = 1-1 = 0
put(buffer);
} else if(token.type == token_desc::VERBATIM) {
} else if(token.type == token_desc::token_type::verbatim) {
put(c);
if(c == '>' && in_.peek() == '>') {
@ -1154,25 +1154,25 @@ bool preprocessor_data::get_chunk()
}
} else if(c == '<' && in_.peek() == '<') {
in_.get();
push_token(token_desc::VERBATIM);
push_token(token_desc::token_type::verbatim);
put('<');
put('<');
} else if(c == '"') {
if(token.type == token_desc::STRING) {
if(token.type == token_desc::token_type::string) {
parent_.quoted_ = false;
put(c);
pop_token();
} else if(!parent_.quoted_) {
parent_.quoted_ = true;
push_token(token_desc::STRING);
push_token(token_desc::token_type::string);
put(c);
} else {
parent_.error("Nested quoted string", linenum_);
}
} else if(c == '{') {
push_token(token_desc::MACRO_SPACE);
push_token(token_desc::token_type::macro_space);
++slowpath_;
} else if(c == ')' && token.type == token_desc::MACRO_PARENS) {
} else if(c == ')' && token.type == token_desc::token_type::macro_parens) {
pop_token();
} else if(c == '#' && !parent_.quoted_) {
std::string command = read_word();
@ -1371,24 +1371,24 @@ bool preprocessor_data::get_chunk()
parent_.error(err, linenum_);
}
} else if(command == "else") {
if(token.type == token_desc::SKIP_ELSE) {
if(token.type == token_desc::token_type::skip_else) {
pop_token();
--skipping_;
push_token(token_desc::PROCESS_ELSE);
} else if(token.type == token_desc::PROCESS_IF) {
push_token(token_desc::token_type::process_else);
} else if(token.type == token_desc::token_type::process_if) {
pop_token();
++skipping_;
push_token(token_desc::SKIP_IF);
push_token(token_desc::token_type::skip_if);
} else {
parent_.error("Unexpected #else", linenum_);
}
} else if(command == "endif") {
switch(token.type) {
case token_desc::SKIP_IF:
case token_desc::SKIP_ELSE:
case token_desc::token_type::skip_if:
case token_desc::token_type::skip_else:
--skipping_;
case token_desc::PROCESS_IF:
case token_desc::PROCESS_ELSE:
case token_desc::token_type::process_if:
case token_desc::token_type::process_else:
break;
default:
parent_.error("Unexpected #endif", linenum_);
@ -1447,21 +1447,21 @@ bool preprocessor_data::get_chunk()
std::string detail = read_rest_of_line();
deprecated_message(get_filename(parent_.location_), level, version, detail);
} else {
comment = token.type != token_desc::MACRO_SPACE;
comment = token.type != token_desc::token_type::macro_space;
}
skip_eol();
if(comment) {
put('\n');
}
} else if(token.type == token_desc::MACRO_SPACE || token.type == token_desc::MACRO_CHUNK) {
} else if(token.type == token_desc::token_type::macro_space || token.type == token_desc::token_type::macro_chunk) {
if(c == '(') {
// If a macro argument was started, it is implicitly ended.
token.type = token_desc::MACRO_SPACE;
push_token(token_desc::MACRO_PARENS);
token.type = token_desc::token_type::macro_space;
push_token(token_desc::token_type::macro_parens);
} else if(utils::portable_isspace(c)) {
// If a macro argument was started, it is implicitly ended.
token.type = token_desc::MACRO_SPACE;
token.type = token_desc::token_type::macro_space;
} else if(c == '}') {
--slowpath_;
if(skipping_) {
@ -1668,13 +1668,13 @@ bool preprocessor_data::get_chunk()
parent_.error("Too many nested preprocessing inclusions", linenum_);
}
} else if(!skipping_) {
if(token.type == token_desc::MACRO_SPACE) {
if(token.type == token_desc::token_type::macro_space) {
std::ostringstream s;
s << OUTPUT_SEPARATOR << "line " << linenum_ << ' ' << parent_.location_ << "\n"
<< OUTPUT_SEPARATOR << "textdomain " << parent_.textdomain_ << '\n';
strings_.push_back(s.str());
token.type = token_desc::MACRO_CHUNK;
token.type = token_desc::token_type::macro_chunk;
}
put(c);
}