Add support for diagnostics
This commit is contained in:
parent
26f1ed27a3
commit
2b35da3a7b
|
@ -16,7 +16,8 @@ set(SRC_FILES
|
|||
src/Parser/BinaryOperatorKind.hpp
|
||||
src/Script.cpp
|
||||
src/Script.hpp
|
||||
)
|
||||
src/Diagnostics/Diagnostics.hpp
|
||||
src/Diagnostics/DiagnosticSeverity.hpp src/Diagnostics/DiagnosticCode.hpp src/Diagnostics/Diagnostic.hpp)
|
||||
|
||||
add_library(PorygonLang ${SRC_FILES})
|
||||
add_executable(PorygonLangTests
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
|
||||
#ifndef PORYGONLANG_DIAGNOSTIC_HPP
|
||||
#define PORYGONLANG_DIAGNOSTIC_HPP
|
||||
|
||||
#include "DiagnosticSeverity.hpp"
|
||||
#include "DiagnosticCode.hpp"
|
||||
|
||||
class Diagnostic{
|
||||
DiagnosticSeverity _severity;
|
||||
DiagnosticCode _code;
|
||||
unsigned int _start;
|
||||
unsigned int _length;
|
||||
public:
|
||||
Diagnostic(DiagnosticSeverity severity, DiagnosticCode code, unsigned int start, unsigned int length){
|
||||
_severity = severity;
|
||||
_code = code;
|
||||
_start = start;
|
||||
_length = length;
|
||||
}
|
||||
};
|
||||
|
||||
#endif //PORYGONLANG_DIAGNOSTIC_HPP
|
|
@ -0,0 +1,9 @@
|
|||
|
||||
#ifndef PORYGONLANG_DIAGNOSTICCODE_HPP
|
||||
#define PORYGONLANG_DIAGNOSTICCODE_HPP
|
||||
|
||||
enum class DiagnosticCode{
|
||||
UnexpectedCharacter,
|
||||
};
|
||||
|
||||
#endif //PORYGONLANG_DIAGNOSTICCODE_HPP
|
|
@ -0,0 +1,11 @@
|
|||
|
||||
#ifndef PORYGONLANG_DIAGNOSTICSEVERITY_HPP
|
||||
#define PORYGONLANG_DIAGNOSTICSEVERITY_HPP
|
||||
|
||||
enum class DiagnosticSeverity{
|
||||
Info,
|
||||
Warning,
|
||||
Error,
|
||||
};
|
||||
|
||||
#endif //PORYGONLANG_DIAGNOSTICSEVERITY_HPP
|
|
@ -0,0 +1,44 @@
|
|||
|
||||
#ifndef PORYGONLANG_DIAGNOSTICS_HPP
|
||||
#define PORYGONLANG_DIAGNOSTICS_HPP
|
||||
|
||||
#include <vector>
|
||||
#include "DiagnosticSeverity.hpp"
|
||||
#include "DiagnosticCode.hpp"
|
||||
#include "Diagnostic.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
||||
class Diagnostics {
|
||||
bool _hasErrors;
|
||||
vector<Diagnostic> _diagnostics;
|
||||
public:
|
||||
Diagnostics(){
|
||||
_hasErrors = false;
|
||||
}
|
||||
|
||||
void Log(DiagnosticSeverity severity, DiagnosticCode code, unsigned int start, unsigned int length){
|
||||
_diagnostics.emplace_back(severity, code, start, length);
|
||||
if (severity >= DiagnosticSeverity::Error){
|
||||
_hasErrors = true;
|
||||
}
|
||||
}
|
||||
void LogError(DiagnosticCode code, unsigned int start, unsigned int length){
|
||||
Log(DiagnosticSeverity::Error, code, start, length);
|
||||
}
|
||||
|
||||
void LogWarning(DiagnosticCode code, unsigned int start, unsigned int length){
|
||||
Log(DiagnosticSeverity::Warning, code, start, length);
|
||||
}
|
||||
|
||||
void LogInfo(DiagnosticCode code, unsigned int start, unsigned int length){
|
||||
Log(DiagnosticSeverity::Info, code, start, length);
|
||||
}
|
||||
|
||||
bool HasErrors(){
|
||||
return _hasErrors;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
#endif //PORYGONLANG_DIAGNOSTICS_HPP
|
|
@ -3,15 +3,16 @@
|
|||
|
||||
#include "Lexer.hpp"
|
||||
|
||||
Lexer::Lexer(string script) {
|
||||
Lexer::Script = std::move(script);
|
||||
Lexer::Position = 0;
|
||||
Lexer::Lexer(string scriptString, class Script* script) {
|
||||
this -> ScriptString = std::move(scriptString);
|
||||
this -> ScriptData = script;
|
||||
this -> Position = 0;
|
||||
}
|
||||
|
||||
vector<IToken*> Lexer::Lex() {
|
||||
vector<IToken*> tokens;
|
||||
while (true){
|
||||
IToken* next = Lexer::LexNext(Lexer::Next());
|
||||
IToken* next = this -> LexNext(this -> Next());
|
||||
tokens.push_back(next);
|
||||
if (next->GetKind() == TokenKind::EndOfFile)
|
||||
break;
|
||||
|
@ -20,9 +21,9 @@ vector<IToken*> Lexer::Lex() {
|
|||
}
|
||||
|
||||
char Lexer::Peek(){
|
||||
if (Lexer::Position > Lexer::Script.length())
|
||||
if (Lexer::Position > this -> ScriptString.length())
|
||||
return '\0';
|
||||
return Lexer::Script[Lexer::Position];
|
||||
return this -> ScriptString[Lexer::Position];
|
||||
}
|
||||
|
||||
char Lexer::Next(){
|
||||
|
@ -34,23 +35,23 @@ char Lexer::Next(){
|
|||
IToken* Lexer::LexNext(char c){
|
||||
switch (c) {
|
||||
case '\0':
|
||||
return new SimpleToken(TokenKind::EndOfFile, Lexer::Position - 1, 1);
|
||||
return new SimpleToken(TokenKind::EndOfFile, this -> Position - 1, 1);
|
||||
case ' ': case '\t': case '\n': case '\r': case '\v': case '\f':
|
||||
return new SimpleToken(TokenKind::WhiteSpace, Lexer::Position - 1, 1);
|
||||
return new SimpleToken(TokenKind::WhiteSpace, this -> Position - 1, 1);
|
||||
case '+':
|
||||
return new SimpleToken(TokenKind::PlusToken, Lexer::Position - 1, 1);
|
||||
return new SimpleToken(TokenKind::PlusToken, this -> Position - 1, 1);
|
||||
case '-':
|
||||
return new SimpleToken(TokenKind::MinusToken, Lexer::Position - 1, 1);
|
||||
return new SimpleToken(TokenKind::MinusToken, this -> Position - 1, 1);
|
||||
case '/':
|
||||
return new SimpleToken(TokenKind::SlashToken, Lexer::Position - 1, 1);
|
||||
return new SimpleToken(TokenKind::SlashToken, this -> Position - 1, 1);
|
||||
case '*':
|
||||
return new SimpleToken(TokenKind::StarToken, Lexer::Position - 1, 1);
|
||||
return new SimpleToken(TokenKind::StarToken, this -> Position - 1, 1);
|
||||
case '=':
|
||||
if (Lexer::Peek() == '='){
|
||||
Lexer::Next();
|
||||
return new SimpleToken(TokenKind::EqualityToken, Lexer::Position - 2, 2);
|
||||
return new SimpleToken(TokenKind::EqualityToken, this -> Position - 2, 2);
|
||||
}
|
||||
return new SimpleToken(TokenKind::AssignmentToken, Lexer::Position - 1, 1);
|
||||
return new SimpleToken(TokenKind::AssignmentToken, this -> Position - 1, 1);
|
||||
case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9':
|
||||
return LexNumber(c);
|
||||
case '_':
|
||||
|
@ -59,7 +60,8 @@ IToken* Lexer::LexNext(char c){
|
|||
if (isalpha(c)){
|
||||
return LexIdentifierOrKeyword(c);
|
||||
}
|
||||
throw;
|
||||
this -> ScriptData->Diagnostics.LogError(DiagnosticCode::UnexpectedCharacter, this -> Position - 1, 1);
|
||||
return new SimpleToken(TokenKind::BadToken, this -> Position - 1, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,19 +87,19 @@ IToken* Lexer::LexNumber(char c){
|
|||
short decimal_index = 0;
|
||||
bool has_point = false;
|
||||
bool is_searching = true;
|
||||
unsigned int start = Lexer::Position - 1;
|
||||
unsigned int start = this -> Position - 1;
|
||||
unsigned int length = 1;
|
||||
while (is_searching){
|
||||
char next = Lexer::Peek();
|
||||
char next = this -> Peek();
|
||||
int next_val = CharToInt(next);
|
||||
if (next_val == -1){
|
||||
switch (next){
|
||||
case '_':
|
||||
Lexer::Next();
|
||||
this -> Next();
|
||||
length++;
|
||||
continue;
|
||||
case '.':
|
||||
Lexer::Next();
|
||||
this -> Next();
|
||||
has_point = true;
|
||||
decimal_index = 0;
|
||||
float_value = int_value;
|
||||
|
@ -109,7 +111,7 @@ IToken* Lexer::LexNumber(char c){
|
|||
}
|
||||
}
|
||||
else{
|
||||
Lexer::Next();
|
||||
this -> Next();
|
||||
length++;
|
||||
if (has_point){
|
||||
decimal_index++;
|
||||
|
@ -137,12 +139,12 @@ unsigned constexpr const_hash(char const *input) {
|
|||
|
||||
IToken* Lexer::LexIdentifierOrKeyword(char c){
|
||||
vector<char> charVec(1, c);
|
||||
auto start = Lexer::Position - 1;
|
||||
auto start = this -> Position - 1;
|
||||
while (true){
|
||||
char next = Lexer::Peek();
|
||||
char next = this -> Peek();
|
||||
if (next == '\0') break;
|
||||
if (isalpha(next) || next == '_'){
|
||||
Lexer::Next();
|
||||
this -> Next();
|
||||
charVec.push_back(next);
|
||||
}
|
||||
else{
|
||||
|
|
|
@ -3,11 +3,12 @@
|
|||
#include <string>
|
||||
#include <vector>
|
||||
#include "Token.hpp"
|
||||
#include "../Script.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
||||
class Lexer {
|
||||
string Script;
|
||||
string ScriptString;
|
||||
#ifdef TESTS_BUILD
|
||||
public:
|
||||
#endif
|
||||
|
@ -18,8 +19,10 @@ public:
|
|||
IToken* LexNumber(char c);
|
||||
IToken *LexIdentifierOrKeyword(char c);
|
||||
public:
|
||||
Script* ScriptData;
|
||||
|
||||
vector<IToken*> Lex();
|
||||
explicit Lexer(string script);
|
||||
explicit Lexer(string scriptString, class Script* script);
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -4,19 +4,19 @@
|
|||
#include "Lexer.hpp"
|
||||
|
||||
TEST_CASE( "When at end of script return terminator", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("");
|
||||
Lexer lexer = Lexer("", nullptr);
|
||||
REQUIRE(lexer.Peek() == '\0');
|
||||
}
|
||||
|
||||
TEST_CASE( "Peek doesn't advance", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("5 + 5");
|
||||
Lexer lexer = Lexer("5 + 5", nullptr);
|
||||
REQUIRE(lexer.Peek() == '5');
|
||||
REQUIRE(lexer.Peek() == '5');
|
||||
REQUIRE(lexer.Peek() == '5');
|
||||
}
|
||||
|
||||
TEST_CASE( "Next does advance", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("5 + 5");
|
||||
Lexer lexer = Lexer("5 + 5", nullptr);
|
||||
REQUIRE(lexer.Next() == '5');
|
||||
REQUIRE(lexer.Next() == ' ');
|
||||
REQUIRE(lexer.Next() == '+');
|
||||
|
@ -26,37 +26,37 @@ TEST_CASE( "Next does advance", "[lexer]" ) {
|
|||
}
|
||||
|
||||
TEST_CASE( "Lex Null Terminator as EOF", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("");
|
||||
Lexer lexer = Lexer("", nullptr);
|
||||
REQUIRE(lexer.LexNext('\0') -> GetKind() == TokenKind::EndOfFile);
|
||||
}
|
||||
|
||||
TEST_CASE( "Lex Plus Token", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("");
|
||||
Lexer lexer = Lexer("", nullptr);
|
||||
REQUIRE(lexer.LexNext('+') -> GetKind() == TokenKind::PlusToken);
|
||||
}
|
||||
|
||||
TEST_CASE( "Lex Minus Token", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("");
|
||||
Lexer lexer = Lexer("", nullptr);
|
||||
REQUIRE(lexer.LexNext('-') -> GetKind() == TokenKind::MinusToken);
|
||||
}
|
||||
|
||||
TEST_CASE( "Lex Slash Token", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("");
|
||||
Lexer lexer = Lexer("", nullptr);
|
||||
REQUIRE(lexer.LexNext('/') -> GetKind() == TokenKind::SlashToken);
|
||||
}
|
||||
|
||||
TEST_CASE( "Lex Star Token", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("");
|
||||
Lexer lexer = Lexer("", nullptr);
|
||||
REQUIRE(lexer.LexNext('*') -> GetKind() == TokenKind::StarToken);
|
||||
}
|
||||
|
||||
TEST_CASE( "Lex Assignment Token", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("");
|
||||
Lexer lexer = Lexer("", nullptr);
|
||||
REQUIRE(lexer.LexNext('=') -> GetKind() == TokenKind::AssignmentToken);
|
||||
}
|
||||
|
||||
TEST_CASE( "Lex Equality Token", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("==");
|
||||
Lexer lexer = Lexer("==", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
|
@ -64,7 +64,7 @@ TEST_CASE( "Lex Equality Token", "[lexer]" ) {
|
|||
}
|
||||
|
||||
TEST_CASE( "Lex Whitespace", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("");
|
||||
Lexer lexer = Lexer("", nullptr);
|
||||
CHECK(lexer.LexNext(' ') -> GetKind() == TokenKind::WhiteSpace);
|
||||
CHECK(lexer.LexNext('\t') -> GetKind() == TokenKind::WhiteSpace);
|
||||
CHECK(lexer.LexNext('\n') -> GetKind() == TokenKind::WhiteSpace);
|
||||
|
@ -74,7 +74,7 @@ TEST_CASE( "Lex Whitespace", "[lexer]" ) {
|
|||
}
|
||||
|
||||
TEST_CASE( "Lex Basic Digits", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("");
|
||||
Lexer lexer = Lexer("", nullptr);
|
||||
CHECK(lexer.LexNext('0') -> GetKind() == TokenKind::Integer);
|
||||
CHECK(lexer.LexNext('1') -> GetKind() == TokenKind::Integer);
|
||||
CHECK(lexer.LexNext('2') -> GetKind() == TokenKind::Integer);
|
||||
|
@ -90,7 +90,7 @@ TEST_CASE( "Lex Basic Digits", "[lexer]" ) {
|
|||
TEST_CASE( "Lex Longer Integers", "[lexer]" ) {
|
||||
long integers[] {0,1,5,9,10,50,100,1000,99999,6484,62163,48862};
|
||||
for (long integer : integers){
|
||||
Lexer lexer = Lexer(std::to_string(integer));
|
||||
Lexer lexer = Lexer(std::to_string(integer), nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
|
@ -103,7 +103,7 @@ TEST_CASE( "Lex Longer Integers", "[lexer]" ) {
|
|||
TEST_CASE( "Lex Floats", "[lexer]" ) {
|
||||
double floats[] {0.5, 0.8, 100.7, 52.3548, 8461354.1324886};
|
||||
for (double f : floats){
|
||||
Lexer lexer = Lexer(std::to_string(f));
|
||||
Lexer lexer = Lexer(std::to_string(f), nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
|
@ -114,133 +114,133 @@ TEST_CASE( "Lex Floats", "[lexer]" ) {
|
|||
}
|
||||
|
||||
TEST_CASE( "Lex And Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("and");
|
||||
Lexer lexer = Lexer("and", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::AndKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex Break Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("break");
|
||||
Lexer lexer = Lexer("break", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::BreakKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex Do Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("do");
|
||||
Lexer lexer = Lexer("do", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::DoKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex else Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("else");
|
||||
Lexer lexer = Lexer("else", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::ElseKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex else if Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("elseif");
|
||||
Lexer lexer = Lexer("elseif", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::ElseIfKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex end Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("end");
|
||||
Lexer lexer = Lexer("end", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::EndKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex false Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("false");
|
||||
Lexer lexer = Lexer("false", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::FalseKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex for Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("for");
|
||||
Lexer lexer = Lexer("for", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::ForKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex function Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("function");
|
||||
Lexer lexer = Lexer("function", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::FunctionKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex if Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("if");
|
||||
Lexer lexer = Lexer("if", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::IfKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex in Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("in");
|
||||
Lexer lexer = Lexer("in", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::InKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex local Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("local");
|
||||
Lexer lexer = Lexer("local", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::LocalKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex nil Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("nil");
|
||||
Lexer lexer = Lexer("nil", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::NilKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex not Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("not");
|
||||
Lexer lexer = Lexer("not", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::NotKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex or Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("or");
|
||||
Lexer lexer = Lexer("or", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::OrKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex return Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("return");
|
||||
Lexer lexer = Lexer("return", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::ReturnKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex then Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("then");
|
||||
Lexer lexer = Lexer("then", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::ThenKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex true Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("true");
|
||||
Lexer lexer = Lexer("true", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
REQUIRE(firstToken -> GetKind() == TokenKind::TrueKeyword);
|
||||
}
|
||||
TEST_CASE( "Lex while Keyword", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("while");
|
||||
Lexer lexer = Lexer("while", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
|
@ -248,7 +248,7 @@ TEST_CASE( "Lex while Keyword", "[lexer]" ) {
|
|||
}
|
||||
|
||||
TEST_CASE( "Lex identifier", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("foo");
|
||||
Lexer lexer = Lexer("foo", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 2);
|
||||
IToken* firstToken = tokens[0];
|
||||
|
@ -257,7 +257,7 @@ TEST_CASE( "Lex identifier", "[lexer]" ) {
|
|||
}
|
||||
|
||||
TEST_CASE( "Lex Start Position", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("+ - bar 1234");
|
||||
Lexer lexer = Lexer("+ - bar 1234", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 8);
|
||||
CHECK(((IdentifierToken*)tokens[0]) -> GetStartPosition() == 0);
|
||||
|
@ -271,7 +271,7 @@ TEST_CASE( "Lex Start Position", "[lexer]" ) {
|
|||
}
|
||||
|
||||
TEST_CASE( "Lex End Position", "[lexer]" ) {
|
||||
Lexer lexer = Lexer("+ - bar 1234");
|
||||
Lexer lexer = Lexer("+ - bar 1234", nullptr);
|
||||
auto tokens = lexer.Lex();
|
||||
REQUIRE(tokens.size() == 8);
|
||||
CHECK(((IdentifierToken*)tokens[0]) -> GetEndPosition() == 0);
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
enum class TokenKind{
|
||||
EndOfFile,
|
||||
BadToken,
|
||||
WhiteSpace,
|
||||
|
||||
PlusToken,
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
|
||||
#include "Script.hpp"
|
||||
#include "Parser/Lexer.hpp"
|
||||
#include "Parser/Parser.hpp"
|
||||
|
||||
Script Script::Create(string script) {
|
||||
auto s = Script();
|
||||
|
@ -10,7 +12,7 @@ Script Script::Create(string script) {
|
|||
}
|
||||
|
||||
void Script::Parse(string script) {
|
||||
auto lexer = Lexer(std::move(script));
|
||||
auto lexer = Lexer(std::move(script), this);
|
||||
auto lexResult = lexer.Lex();
|
||||
auto parser = Parser(lexResult);
|
||||
auto parseResult = parser.Parse();
|
||||
|
|
|
@ -5,19 +5,17 @@
|
|||
#define PORYGONLANG_SCRIPT_HPP
|
||||
|
||||
#include <string>
|
||||
#include "Parser/Lexer.hpp"
|
||||
#include "Parser/Parser.hpp"
|
||||
#include "Diagnostics/Diagnostics.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
||||
class Script {
|
||||
explicit Script(){
|
||||
}
|
||||
explicit Script() = default;
|
||||
|
||||
void Parse(string script);
|
||||
public:
|
||||
static Script Create(string script);
|
||||
Diagnostics Diagnostics;
|
||||
};
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue