Add support for diagnostics

This commit is contained in:
2019-05-21 13:56:08 +02:00
parent 26f1ed27a3
commit 2b35da3a7b
11 changed files with 161 additions and 68 deletions

View File

@@ -4,19 +4,19 @@
#include "Lexer.hpp"
TEST_CASE( "When at end of script return terminator", "[lexer]" ) {
Lexer lexer = Lexer("");
Lexer lexer = Lexer("", nullptr);
REQUIRE(lexer.Peek() == '\0');
}
TEST_CASE( "Peek doesn't advance", "[lexer]" ) {
Lexer lexer = Lexer("5 + 5");
Lexer lexer = Lexer("5 + 5", nullptr);
REQUIRE(lexer.Peek() == '5');
REQUIRE(lexer.Peek() == '5');
REQUIRE(lexer.Peek() == '5');
}
TEST_CASE( "Next does advance", "[lexer]" ) {
Lexer lexer = Lexer("5 + 5");
Lexer lexer = Lexer("5 + 5", nullptr);
REQUIRE(lexer.Next() == '5');
REQUIRE(lexer.Next() == ' ');
REQUIRE(lexer.Next() == '+');
@@ -26,37 +26,37 @@ TEST_CASE( "Next does advance", "[lexer]" ) {
}
TEST_CASE( "Lex Null Terminator as EOF", "[lexer]" ) {
Lexer lexer = Lexer("");
Lexer lexer = Lexer("", nullptr);
REQUIRE(lexer.LexNext('\0') -> GetKind() == TokenKind::EndOfFile);
}
TEST_CASE( "Lex Plus Token", "[lexer]" ) {
Lexer lexer = Lexer("");
Lexer lexer = Lexer("", nullptr);
REQUIRE(lexer.LexNext('+') -> GetKind() == TokenKind::PlusToken);
}
TEST_CASE( "Lex Minus Token", "[lexer]" ) {
Lexer lexer = Lexer("");
Lexer lexer = Lexer("", nullptr);
REQUIRE(lexer.LexNext('-') -> GetKind() == TokenKind::MinusToken);
}
TEST_CASE( "Lex Slash Token", "[lexer]" ) {
Lexer lexer = Lexer("");
Lexer lexer = Lexer("", nullptr);
REQUIRE(lexer.LexNext('/') -> GetKind() == TokenKind::SlashToken);
}
TEST_CASE( "Lex Star Token", "[lexer]" ) {
Lexer lexer = Lexer("");
Lexer lexer = Lexer("", nullptr);
REQUIRE(lexer.LexNext('*') -> GetKind() == TokenKind::StarToken);
}
TEST_CASE( "Lex Assignment Token", "[lexer]" ) {
Lexer lexer = Lexer("");
Lexer lexer = Lexer("", nullptr);
REQUIRE(lexer.LexNext('=') -> GetKind() == TokenKind::AssignmentToken);
}
TEST_CASE( "Lex Equality Token", "[lexer]" ) {
Lexer lexer = Lexer("==");
Lexer lexer = Lexer("==", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
@@ -64,7 +64,7 @@ TEST_CASE( "Lex Equality Token", "[lexer]" ) {
}
TEST_CASE( "Lex Whitespace", "[lexer]" ) {
Lexer lexer = Lexer("");
Lexer lexer = Lexer("", nullptr);
CHECK(lexer.LexNext(' ') -> GetKind() == TokenKind::WhiteSpace);
CHECK(lexer.LexNext('\t') -> GetKind() == TokenKind::WhiteSpace);
CHECK(lexer.LexNext('\n') -> GetKind() == TokenKind::WhiteSpace);
@@ -74,7 +74,7 @@ TEST_CASE( "Lex Whitespace", "[lexer]" ) {
}
TEST_CASE( "Lex Basic Digits", "[lexer]" ) {
Lexer lexer = Lexer("");
Lexer lexer = Lexer("", nullptr);
CHECK(lexer.LexNext('0') -> GetKind() == TokenKind::Integer);
CHECK(lexer.LexNext('1') -> GetKind() == TokenKind::Integer);
CHECK(lexer.LexNext('2') -> GetKind() == TokenKind::Integer);
@@ -90,7 +90,7 @@ TEST_CASE( "Lex Basic Digits", "[lexer]" ) {
TEST_CASE( "Lex Longer Integers", "[lexer]" ) {
long integers[] {0,1,5,9,10,50,100,1000,99999,6484,62163,48862};
for (long integer : integers){
Lexer lexer = Lexer(std::to_string(integer));
Lexer lexer = Lexer(std::to_string(integer), nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
@@ -103,7 +103,7 @@ TEST_CASE( "Lex Longer Integers", "[lexer]" ) {
TEST_CASE( "Lex Floats", "[lexer]" ) {
double floats[] {0.5, 0.8, 100.7, 52.3548, 8461354.1324886};
for (double f : floats){
Lexer lexer = Lexer(std::to_string(f));
Lexer lexer = Lexer(std::to_string(f), nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
@@ -114,133 +114,133 @@ TEST_CASE( "Lex Floats", "[lexer]" ) {
}
TEST_CASE( "Lex And Keyword", "[lexer]" ) {
Lexer lexer = Lexer("and");
Lexer lexer = Lexer("and", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::AndKeyword);
}
TEST_CASE( "Lex Break Keyword", "[lexer]" ) {
Lexer lexer = Lexer("break");
Lexer lexer = Lexer("break", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::BreakKeyword);
}
TEST_CASE( "Lex Do Keyword", "[lexer]" ) {
Lexer lexer = Lexer("do");
Lexer lexer = Lexer("do", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::DoKeyword);
}
TEST_CASE( "Lex else Keyword", "[lexer]" ) {
Lexer lexer = Lexer("else");
Lexer lexer = Lexer("else", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::ElseKeyword);
}
TEST_CASE( "Lex else if Keyword", "[lexer]" ) {
Lexer lexer = Lexer("elseif");
Lexer lexer = Lexer("elseif", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::ElseIfKeyword);
}
TEST_CASE( "Lex end Keyword", "[lexer]" ) {
Lexer lexer = Lexer("end");
Lexer lexer = Lexer("end", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::EndKeyword);
}
TEST_CASE( "Lex false Keyword", "[lexer]" ) {
Lexer lexer = Lexer("false");
Lexer lexer = Lexer("false", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::FalseKeyword);
}
TEST_CASE( "Lex for Keyword", "[lexer]" ) {
Lexer lexer = Lexer("for");
Lexer lexer = Lexer("for", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::ForKeyword);
}
TEST_CASE( "Lex function Keyword", "[lexer]" ) {
Lexer lexer = Lexer("function");
Lexer lexer = Lexer("function", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::FunctionKeyword);
}
TEST_CASE( "Lex if Keyword", "[lexer]" ) {
Lexer lexer = Lexer("if");
Lexer lexer = Lexer("if", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::IfKeyword);
}
TEST_CASE( "Lex in Keyword", "[lexer]" ) {
Lexer lexer = Lexer("in");
Lexer lexer = Lexer("in", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::InKeyword);
}
TEST_CASE( "Lex local Keyword", "[lexer]" ) {
Lexer lexer = Lexer("local");
Lexer lexer = Lexer("local", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::LocalKeyword);
}
TEST_CASE( "Lex nil Keyword", "[lexer]" ) {
Lexer lexer = Lexer("nil");
Lexer lexer = Lexer("nil", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::NilKeyword);
}
TEST_CASE( "Lex not Keyword", "[lexer]" ) {
Lexer lexer = Lexer("not");
Lexer lexer = Lexer("not", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::NotKeyword);
}
TEST_CASE( "Lex or Keyword", "[lexer]" ) {
Lexer lexer = Lexer("or");
Lexer lexer = Lexer("or", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::OrKeyword);
}
TEST_CASE( "Lex return Keyword", "[lexer]" ) {
Lexer lexer = Lexer("return");
Lexer lexer = Lexer("return", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::ReturnKeyword);
}
TEST_CASE( "Lex then Keyword", "[lexer]" ) {
Lexer lexer = Lexer("then");
Lexer lexer = Lexer("then", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::ThenKeyword);
}
TEST_CASE( "Lex true Keyword", "[lexer]" ) {
Lexer lexer = Lexer("true");
Lexer lexer = Lexer("true", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::TrueKeyword);
}
TEST_CASE( "Lex while Keyword", "[lexer]" ) {
Lexer lexer = Lexer("while");
Lexer lexer = Lexer("while", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
@@ -248,7 +248,7 @@ TEST_CASE( "Lex while Keyword", "[lexer]" ) {
}
TEST_CASE( "Lex identifier", "[lexer]" ) {
Lexer lexer = Lexer("foo");
Lexer lexer = Lexer("foo", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
IToken* firstToken = tokens[0];
@@ -257,7 +257,7 @@ TEST_CASE( "Lex identifier", "[lexer]" ) {
}
TEST_CASE( "Lex Start Position", "[lexer]" ) {
Lexer lexer = Lexer("+ - bar 1234");
Lexer lexer = Lexer("+ - bar 1234", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 8);
CHECK(((IdentifierToken*)tokens[0]) -> GetStartPosition() == 0);
@@ -271,7 +271,7 @@ TEST_CASE( "Lex Start Position", "[lexer]" ) {
}
TEST_CASE( "Lex End Position", "[lexer]" ) {
Lexer lexer = Lexer("+ - bar 1234");
Lexer lexer = Lexer("+ - bar 1234", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 8);
CHECK(((IdentifierToken*)tokens[0]) -> GetEndPosition() == 0);