PorygonLang/tests/parser/LexerTests.cpp

448 lines
14 KiB
C++
Raw Normal View History

#ifdef TESTS_BUILD
#include <catch.hpp>
#include <codecvt>
#include <locale>
2019-05-21 15:23:56 +00:00
#include "../../src/Parser/Lexer.hpp"
using namespace Porygon::Parser;
TEST_CASE( "When at end of script return terminator", "[lexer]" ) {
Lexer lexer = Lexer(u"", nullptr);
REQUIRE(lexer.Peek() == '\0');
}
TEST_CASE( "Peek doesn't advance", "[lexer]" ) {
auto script = new u16string(u"5 + 5"); // Create as reference to ensure the compiler plays nice with it in release builds
Lexer lexer = Lexer(*script, nullptr);
REQUIRE(lexer.Peek() == '5');
REQUIRE(lexer.Peek() == '5');
REQUIRE(lexer.Peek() == '5');
delete script;
}
TEST_CASE( "Next does advance", "[lexer]" ) {
auto script = new u16string(u"5 + 5"); // Create as reference to ensure the compiler plays nice with it in release builds
Lexer lexer = Lexer(*script, nullptr);
REQUIRE(lexer.Next() == '5');
REQUIRE(lexer.Next() == ' ');
REQUIRE(lexer.Next() == '+');
REQUIRE(lexer.Next() == ' ');
REQUIRE(lexer.Next() == '5');
REQUIRE(lexer.Next() == '\0');
delete script;
}
TEST_CASE( "Lex Null Terminator as EOF", "[lexer]" ) {
Lexer lexer = Lexer(u"", nullptr);
auto next = lexer.LexNext('\0');
REQUIRE(next -> GetKind() == TokenKind::EndOfFile);
delete next;
}
TEST_CASE( "Lex Plus Token", "[lexer]" ) {
Lexer lexer = Lexer(u"", nullptr);
auto next = lexer.LexNext('+');
REQUIRE(next -> GetKind() == TokenKind::PlusToken);
delete next;
}
TEST_CASE( "Lex Minus Token", "[lexer]" ) {
Lexer lexer = Lexer(u"", nullptr);
auto next = lexer.LexNext('-');
REQUIRE(next -> GetKind() == TokenKind::MinusToken);
delete next;
}
TEST_CASE( "Lex Slash Token", "[lexer]" ) {
Lexer lexer = Lexer(u"", nullptr);
auto next = lexer.LexNext('/');
REQUIRE(next -> GetKind() == TokenKind::SlashToken);
delete next;
}
TEST_CASE( "Lex Star Token", "[lexer]" ) {
Lexer lexer = Lexer(u"", nullptr);
auto next = lexer.LexNext('*');
REQUIRE(next -> GetKind() == TokenKind::StarToken);
delete next;
}
TEST_CASE( "Lex Assignment Token", "[lexer]" ) {
Lexer lexer = Lexer(u"", nullptr);
auto next = lexer.LexNext('=');
REQUIRE(next -> GetKind() == TokenKind::AssignmentToken);
delete next;
}
TEST_CASE( "Lex Equality Token", "[lexer]" ) {
Lexer lexer = Lexer(u"==", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::EqualityToken);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex Whitespace", "[lexer]" ) {
Lexer lexer = Lexer(u"", nullptr);
vector<char> whitespace {' ', '\t', '\n', '\r', '\v', '\f'};
for (char c: whitespace){
auto t = lexer.LexNext(c);
CHECK(t -> GetKind() == TokenKind::WhiteSpace);
delete t;
}
}
2019-05-19 14:11:16 +00:00
TEST_CASE( "Lex Basic Digits", "[lexer]" ) {
Lexer lexer = Lexer(u"", nullptr);
vector<char> ints {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'};
for (char c: ints){
auto t = lexer.LexNext(c);
CHECK(t -> GetKind() == TokenKind::Integer);
delete t;
}
}
std::u16string to_u16string(long const &i) {
std::wstring_convert<std::codecvt_utf8_utf16<char16_t, 0x10ffff, std::little_endian>, char16_t> conv;
return conv.from_bytes(std::to_string(i));
}
TEST_CASE( "Lex Longer Integers", "[lexer]" ) {
long integers[] {0,1,5,9,10,50,100,1000,99999,6484,62163,48862};
for (long integer : integers){
auto s = to_u16string(integer);
Lexer lexer = Lexer(s, nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::Integer);
auto* integerToken = (IntegerToken *)firstToken;
2019-06-13 16:49:38 +00:00
CHECK(integerToken -> GetValue() == integer);
for (auto t: tokens){
delete t;
}
}
}
std::u16string to_u16string(double const &i) {
std::wstring_convert<std::codecvt_utf8_utf16<char16_t, 0x10ffff, std::little_endian>, char16_t> conv;
return conv.from_bytes(std::to_string(i));}
TEST_CASE( "Lex Floats", "[lexer]" ) {
double floats[] {0.5, 0.8, 100.7, 52.3548, 8461354.1324886};
for (double f : floats){
auto s = to_u16string(f);
Lexer lexer = Lexer(s, nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::Float);
auto* floatToken = (FloatToken *)firstToken;
2019-06-13 16:49:38 +00:00
CHECK(floatToken -> GetValue() == Approx(f));
for (auto t: tokens){
delete t;
}
}
}
TEST_CASE( "Lex And Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"and", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::AndKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex Break Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"break", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::BreakKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex Do Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"do", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::DoKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex else Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"else", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::ElseKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex else if Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"elseif", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::ElseIfKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex end Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"end", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::EndKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex false Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"false", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::FalseKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex for Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"for", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::ForKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex function Keyword", "[lexer]" ) {
auto s = new u16string(u"function");
Lexer lexer = Lexer(*s, nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::FunctionKeyword);
for (auto t: tokens){
delete t;
}
delete s;
}
TEST_CASE( "Lex if Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"if", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::IfKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex in Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"in", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::InKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex local Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"local", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::LocalKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex nil Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"nil", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::NilKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex not Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"not", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::NotKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex or Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"or", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::OrKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex return Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"return", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::ReturnKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex then Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"then", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::ThenKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex true Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"true", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::TrueKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex while Keyword", "[lexer]" ) {
Lexer lexer = Lexer(u"while", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::WhileKeyword);
for (auto t: tokens){
delete t;
}
}
TEST_CASE( "Lex identifier", "[lexer]" ) {
Lexer lexer = Lexer(u"foo", nullptr);
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
REQUIRE(firstToken -> GetKind() == TokenKind::Identifier);
2019-06-13 16:49:38 +00:00
REQUIRE(((IdentifierToken*)firstToken) -> GetValue() == HashedString("foo"));
for (auto t: tokens){
delete t;
}
}
2019-05-19 14:11:16 +00:00
TEST_CASE( "Lex Start Position", "[lexer]" ) {
auto s = new u16string(u"+ - bar 1234");
Lexer lexer = Lexer(*s, nullptr);
2019-05-19 14:11:16 +00:00
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 5);
2019-05-19 14:11:16 +00:00
CHECK(((IdentifierToken*)tokens[0]) -> GetStartPosition() == 0);
CHECK(((IdentifierToken*)tokens[1]) -> GetStartPosition() == 2);
CHECK(((IdentifierToken*)tokens[2]) -> GetStartPosition() == 4);
CHECK(((IdentifierToken*)tokens[3]) -> GetStartPosition() == 8);
CHECK(((IdentifierToken*)tokens[4]) -> GetStartPosition() == 12);
for (auto t: tokens){
delete t;
}
delete s;
2019-05-19 14:11:16 +00:00
}
TEST_CASE( "Lex End Position", "[lexer]" ) {
auto s = new u16string(u"+ - bar 1234");
Lexer lexer = Lexer(*s, nullptr);
2019-05-19 14:11:16 +00:00
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 5);
2019-05-19 14:11:16 +00:00
CHECK(((IdentifierToken*)tokens[0]) -> GetEndPosition() == 0);
CHECK(((IdentifierToken*)tokens[1]) -> GetEndPosition() == 2);
CHECK(((IdentifierToken*)tokens[2]) -> GetEndPosition() == 6);
CHECK(((IdentifierToken*)tokens[3]) -> GetEndPosition() == 11);
CHECK(((IdentifierToken*)tokens[4]) -> GetEndPosition() == 12);
for (auto t: tokens){
delete t;
}
delete s;
2019-05-19 14:11:16 +00:00
}
2019-05-22 11:24:28 +00:00
TEST_CASE("Lex Double Quote String", "[lexer]") {
auto s = new u16string(u"\"foo bar\"");
Lexer lexer = Lexer(*s, nullptr);
2019-05-22 11:24:28 +00:00
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
2019-05-22 11:24:28 +00:00
REQUIRE(firstToken -> GetKind() == TokenKind::String);
REQUIRE(((StringToken*)firstToken) -> GetValue() == u"foo bar");
for (auto t: tokens){
delete t;
}
delete s;
2019-05-22 11:24:28 +00:00
}
TEST_CASE("Lex Single Quote String", "[lexer]") {
auto s = new u16string(u"'foo bar'");
Lexer lexer = Lexer(*s, nullptr);
2019-05-22 11:24:28 +00:00
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
2019-05-22 11:24:28 +00:00
REQUIRE(firstToken -> GetKind() == TokenKind::String);
REQUIRE(((StringToken*)firstToken) -> GetValue() == u"foo bar");
for (auto t: tokens){
delete t;
}
delete s;
2019-05-22 11:24:28 +00:00
}
TEST_CASE("Lex Double Quote String, Escape Quote", "[lexer]") {
auto s = new u16string(u"'foo\\\"bar'");
Lexer lexer = Lexer(*s, nullptr);
2019-05-22 11:24:28 +00:00
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
2019-05-22 11:24:28 +00:00
REQUIRE(firstToken -> GetKind() == TokenKind::String);
REQUIRE(((StringToken*)firstToken) -> GetValue() == u"foo\"bar");
for (auto t: tokens){
delete t;
}
delete s;
2019-05-22 11:24:28 +00:00
}
TEST_CASE("Lex String with newline", "[lexer]") {
auto s = new u16string(u"'foo\\nbar'");
Lexer lexer = Lexer(*s, nullptr);
2019-05-22 11:24:28 +00:00
auto tokens = lexer.Lex();
REQUIRE(tokens.size() == 2);
2019-06-13 16:49:38 +00:00
const IToken* firstToken = tokens[0];
2019-05-22 11:24:28 +00:00
REQUIRE(firstToken -> GetKind() == TokenKind::String);
REQUIRE(((StringToken*)firstToken) -> GetValue() == u"foo\nbar");
for (auto t: tokens){
delete t;
}
delete s;
2019-05-22 11:24:28 +00:00
}
#endif