256 lines
8.6 KiB
C++
256 lines
8.6 KiB
C++
|
|
||
|
#ifdef TESTS_BUILD
|
||
|
#include <catch2/catch.hpp>
|
||
|
#include "Lexer.hpp"
|
||
|
|
||
|
TEST_CASE( "When at end of script return terminator", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("");
|
||
|
REQUIRE(lexer.Peek() == '\0');
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Peek doesn't advance", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("5 + 5");
|
||
|
REQUIRE(lexer.Peek() == '5');
|
||
|
REQUIRE(lexer.Peek() == '5');
|
||
|
REQUIRE(lexer.Peek() == '5');
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Next does advance", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("5 + 5");
|
||
|
REQUIRE(lexer.Next() == '5');
|
||
|
REQUIRE(lexer.Next() == ' ');
|
||
|
REQUIRE(lexer.Next() == '+');
|
||
|
REQUIRE(lexer.Next() == ' ');
|
||
|
REQUIRE(lexer.Next() == '5');
|
||
|
REQUIRE(lexer.Next() == '\0');
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Lex Null Terminator as EOF", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("");
|
||
|
REQUIRE(lexer.LexNext('\0') -> GetKind() == TokenKind::EndOfFile);
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Lex Plus Token", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("");
|
||
|
REQUIRE(lexer.LexNext('+') -> GetKind() == TokenKind::PlusToken);
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Lex Minus Token", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("");
|
||
|
REQUIRE(lexer.LexNext('-') -> GetKind() == TokenKind::MinusToken);
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Lex Slash Token", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("");
|
||
|
REQUIRE(lexer.LexNext('/') -> GetKind() == TokenKind::SlashToken);
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Lex Star Token", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("");
|
||
|
REQUIRE(lexer.LexNext('*') -> GetKind() == TokenKind::StarToken);
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Lex Assignment Token", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("");
|
||
|
REQUIRE(lexer.LexNext('=') -> GetKind() == TokenKind::AssignmentToken);
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Lex Equality Token", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("==");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::EqualityToken);
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Lex Whitespace", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("");
|
||
|
CHECK(lexer.LexNext(' ') -> GetKind() == TokenKind::WhiteSpace);
|
||
|
CHECK(lexer.LexNext('\t') -> GetKind() == TokenKind::WhiteSpace);
|
||
|
CHECK(lexer.LexNext('\n') -> GetKind() == TokenKind::WhiteSpace);
|
||
|
CHECK(lexer.LexNext('\r') -> GetKind() == TokenKind::WhiteSpace);
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Lex Basic Integers", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("");
|
||
|
CHECK(lexer.LexNext('0') -> GetKind() == TokenKind::Integer);
|
||
|
CHECK(lexer.LexNext('1') -> GetKind() == TokenKind::Integer);
|
||
|
CHECK(lexer.LexNext('2') -> GetKind() == TokenKind::Integer);
|
||
|
CHECK(lexer.LexNext('3') -> GetKind() == TokenKind::Integer);
|
||
|
CHECK(lexer.LexNext('4') -> GetKind() == TokenKind::Integer);
|
||
|
CHECK(lexer.LexNext('5') -> GetKind() == TokenKind::Integer);
|
||
|
CHECK(lexer.LexNext('6') -> GetKind() == TokenKind::Integer);
|
||
|
CHECK(lexer.LexNext('7') -> GetKind() == TokenKind::Integer);
|
||
|
CHECK(lexer.LexNext('8') -> GetKind() == TokenKind::Integer);
|
||
|
CHECK(lexer.LexNext('9') -> GetKind() == TokenKind::Integer);
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Lex Longer Integers", "[lexer]" ) {
|
||
|
long integers[] {0,1,5,9,10,50,100,1000,99999,6484,62163,48862};
|
||
|
for (long integer : integers){
|
||
|
Lexer lexer = Lexer(std::to_string(integer));
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::Integer);
|
||
|
auto* integerToken = (IntegerToken *)firstToken;
|
||
|
CHECK(integerToken -> Value == integer);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Lex Floats", "[lexer]" ) {
|
||
|
double floats[] {0.5, 0.8, 100.7, 52.3548, 8461354.1324886};
|
||
|
for (double f : floats){
|
||
|
Lexer lexer = Lexer(std::to_string(f));
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::Float);
|
||
|
auto* floatToken = (FloatToken *)firstToken;
|
||
|
CHECK(floatToken -> Value == Approx(f));
|
||
|
}
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Lex And Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("and");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::AndKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex Break Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("break");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::BreakKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex Do Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("do");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::DoKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex else Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("else");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::ElseKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex else if Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("elseif");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::ElseIfKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex end Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("end");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::EndKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex false Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("false");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::FalseKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex for Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("for");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::ForKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex function Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("function");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::FunctionKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex if Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("if");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::IfKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex in Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("in");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::InKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex local Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("local");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::LocalKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex nil Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("nil");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::NilKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex not Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("not");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::NotKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex or Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("or");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::OrKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex return Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("return");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::ReturnKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex then Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("then");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::ThenKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex true Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("true");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::TrueKeyword);
|
||
|
}
|
||
|
TEST_CASE( "Lex while Keyword", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("while");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::WhileKeyword);
|
||
|
}
|
||
|
|
||
|
TEST_CASE( "Lex identifier", "[lexer]" ) {
|
||
|
Lexer lexer = Lexer("foo");
|
||
|
auto tokens = lexer.Lex();
|
||
|
REQUIRE(tokens.size() == 2);
|
||
|
IToken* firstToken = tokens[0];
|
||
|
REQUIRE(firstToken -> GetKind() == TokenKind::Identifier);
|
||
|
REQUIRE(((IdentifierToken*)firstToken) -> Value == "foo");
|
||
|
}
|
||
|
#endif
|