Fix all valgrind leak issues in tests
continuous-integration/drone/push Build is failing
Details
continuous-integration/drone/push Build is failing
Details
Signed-off-by: Deukhoofd <deukhoofd@gmail.com>
This commit is contained in:
parent
76b8ba3ebc
commit
021750a135
|
@ -23,8 +23,18 @@ namespace Porygon::Evaluation {
|
||||||
return v->EvaluateBool();
|
return v->EvaluateBool();
|
||||||
}
|
}
|
||||||
|
|
||||||
const char16_t *EvaluateEvalValueString(EvalValue *v) {
|
size_t GetEvalValueStringLength(EvalValue *v) {
|
||||||
return (new u16string(v->EvaluateString()))->c_str();
|
auto result = v->EvaluateString();
|
||||||
|
return result.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
int EvaluateEvalValueString(EvalValue *v, char16_t dst[]){
|
||||||
|
auto result = v->EvaluateString();
|
||||||
|
for (int i = 0; i < result.size(); i++){
|
||||||
|
dst[i] = result[i];
|
||||||
|
}
|
||||||
|
dst[result.size() + 1] = '\0';
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -52,13 +62,18 @@ namespace Porygon::Evaluation {
|
||||||
|
|
||||||
|
|
||||||
TEST_CASE( "Evaluate String", "[integration]" ) {
|
TEST_CASE( "Evaluate String", "[integration]" ) {
|
||||||
auto script = Porygon::Script::Create(u"\"foo bar\"");
|
auto sc = new u16string(u"\"foo bar\"");
|
||||||
|
auto script = Porygon::Script::Create(*sc);
|
||||||
REQUIRE(!script->Diagnostics -> HasErrors());
|
REQUIRE(!script->Diagnostics -> HasErrors());
|
||||||
script->Evaluate();
|
script->Evaluate();
|
||||||
auto lastValue = script->GetLastValue();
|
auto lastValue = script->GetLastValue();
|
||||||
auto s = u16string(EvaluateEvalValueString(lastValue));
|
size_t size = GetEvalValueStringLength(lastValue);
|
||||||
|
char16_t dst[size + 1];
|
||||||
|
EvaluateEvalValueString(lastValue, dst);
|
||||||
|
auto s = u16string(dst);
|
||||||
REQUIRE(s == u"foo bar");
|
REQUIRE(s == u"foo bar");
|
||||||
delete script;
|
delete script;
|
||||||
|
delete sc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ namespace Porygon::Evaluation{
|
||||||
: _iterator(table->GetTableIterator()), _end(table->GetTableIteratorEnd()){}
|
: _iterator(table->GetTableIterator()), _end(table->GetTableIteratorEnd()){}
|
||||||
|
|
||||||
shared_ptr<EvalValue> GetCurrent() final{
|
shared_ptr<EvalValue> GetCurrent() final{
|
||||||
return make_shared<StringEvalValue>(_iterator->first.GetString());
|
return make_shared<StringEvalValue>(*_iterator->first.GetString());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MoveNext() final{
|
bool MoveNext() final{
|
||||||
|
|
|
@ -111,6 +111,10 @@ namespace Porygon::Parser {
|
||||||
delete _expression;
|
delete _expression;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void NullifyExpression(){
|
||||||
|
_expression = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
const ParsedStatementKind GetKind() const final {
|
const ParsedStatementKind GetKind() const final {
|
||||||
return ParsedStatementKind::Expression;
|
return ParsedStatementKind::Expression;
|
||||||
}
|
}
|
||||||
|
|
|
@ -525,7 +525,10 @@ namespace Porygon::Parser {
|
||||||
// If the first item is an expression, and is followed by a comma, we're dealing with a simple {1, 2, 3} kind of array
|
// If the first item is an expression, and is followed by a comma, we're dealing with a simple {1, 2, 3} kind of array
|
||||||
if (firstItem->GetKind() == ParsedStatementKind::Expression &&
|
if (firstItem->GetKind() == ParsedStatementKind::Expression &&
|
||||||
(this->Peek()->GetKind() == TokenKind::CommaToken)) {
|
(this->Peek()->GetKind() == TokenKind::CommaToken)) {
|
||||||
auto expr = ((ParsedExpressionStatement *) firstItem)->GetExpression();
|
auto statement = ((ParsedExpressionStatement *) firstItem);
|
||||||
|
auto expr = statement->GetExpression();
|
||||||
|
statement->NullifyExpression();
|
||||||
|
delete statement;
|
||||||
auto expressions = vector<const ParsedExpression *>{expr};
|
auto expressions = vector<const ParsedExpression *>{expr};
|
||||||
auto n = this->Next(); // consume the comma
|
auto n = this->Next(); // consume the comma
|
||||||
bool hasErrors = false;
|
bool hasErrors = false;
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
#ifndef PORYGONLANG_USERDATA_HPP
|
#ifndef PORYGONLANG_USERDATA_HPP
|
||||||
#define PORYGONLANG_USERDATA_HPP
|
#define PORYGONLANG_USERDATA_HPP
|
||||||
|
|
||||||
|
@ -9,23 +11,30 @@ namespace Porygon::UserData {
|
||||||
class UserData {
|
class UserData {
|
||||||
std::unordered_map<uint32_t, UserDataField *> _fields;
|
std::unordered_map<uint32_t, UserDataField *> _fields;
|
||||||
public:
|
public:
|
||||||
explicit UserData(std::unordered_map<uint32_t, UserDataField *> fields) {
|
explicit UserData(std::unordered_map<uint32_t, UserDataField *> fields)
|
||||||
_fields = std::move(fields);
|
: _fields(std::move(fields))
|
||||||
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ContainsField(uint32_t fieldId) {
|
~UserData(){
|
||||||
|
for (auto f: _fields){
|
||||||
|
delete f.second;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ContainsField(uint32_t fieldId) const{
|
||||||
return _fields.find(fieldId) != _fields.end();
|
return _fields.find(fieldId) != _fields.end();
|
||||||
}
|
}
|
||||||
|
|
||||||
UserDataField *GetField(uint32_t fieldId) {
|
UserDataField *GetField(uint32_t fieldId) const {
|
||||||
return _fields[fieldId];
|
return _fields.at(fieldId);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CreateField(uint32_t fieldId, UserDataField *field) {
|
void CreateField(uint32_t fieldId, UserDataField *field) {
|
||||||
_fields.insert({fieldId, field});
|
_fields.insert({fieldId, field});
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t GetFieldCount() {
|
int32_t GetFieldCount() const{
|
||||||
return _fields.size();
|
return _fields.size();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -10,14 +10,14 @@
|
||||||
|
|
||||||
namespace Porygon::UserData {
|
namespace Porygon::UserData {
|
||||||
class UserDataScriptType : public ScriptType {
|
class UserDataScriptType : public ScriptType {
|
||||||
shared_ptr<UserData> _userData;
|
UserData* _userData;
|
||||||
public:
|
public:
|
||||||
explicit UserDataScriptType(uint32_t id) : ScriptType(TypeClass::UserData) {
|
explicit UserDataScriptType(uint32_t id) : ScriptType(TypeClass::UserData) {
|
||||||
_userData = UserDataStorage::GetUserDataType(id);
|
_userData = UserDataStorage::GetUserDataType(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
explicit UserDataScriptType(shared_ptr<UserData> ud) : ScriptType(TypeClass::UserData) {
|
explicit UserDataScriptType(UserData* ud) : ScriptType(TypeClass::UserData) {
|
||||||
_userData = std::move(ud);
|
_userData = ud;
|
||||||
}
|
}
|
||||||
|
|
||||||
const bool CanBeIndexedWith(ScriptType *indexer) const final {
|
const bool CanBeIndexedWith(ScriptType *indexer) const final {
|
||||||
|
|
|
@ -3,27 +3,46 @@
|
||||||
#define PORYGONLANG_USERDATASTORAGE_HPP
|
#define PORYGONLANG_USERDATASTORAGE_HPP
|
||||||
|
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
#include <mutex>
|
||||||
#include "UserData.hpp"
|
#include "UserData.hpp"
|
||||||
|
|
||||||
namespace Porygon::UserData {
|
namespace Porygon::UserData {
|
||||||
class UserDataStorage {
|
class UserDataStorage {
|
||||||
class _internalDataStorage {
|
class _internalDataStorage {
|
||||||
public:
|
public:
|
||||||
std::unordered_map<uint32_t, shared_ptr<UserData>> _userData;
|
std::unordered_map<uint32_t, UserData*> _userData;
|
||||||
|
std::mutex _userDataMutex;
|
||||||
|
|
||||||
|
~_internalDataStorage(){
|
||||||
|
std::lock_guard<std::mutex> guard(_userDataMutex);
|
||||||
|
for (auto u: _userData){
|
||||||
|
delete u.second;
|
||||||
|
}
|
||||||
|
_userData.clear();
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
static _internalDataStorage _internal;
|
static _internalDataStorage _internal;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static void RegisterType(uint32_t i, UserData *ud) {
|
static void RegisterType(uint32_t i, UserData *ud) {
|
||||||
UserDataStorage::_internal._userData.insert({i, shared_ptr<UserData>(ud)});
|
std::lock_guard<std::mutex> guard(_internal._userDataMutex);
|
||||||
|
UserDataStorage::_internal._userData.insert({i, ud});
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ClearTypes(){
|
||||||
|
std::lock_guard<std::mutex> guard(_internal._userDataMutex);
|
||||||
|
for (auto u: _internal._userData){
|
||||||
|
delete u.second;
|
||||||
|
}
|
||||||
|
_internal._userData.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool HasUserDataType(uint32_t i) {
|
static bool HasUserDataType(uint32_t i) {
|
||||||
return UserDataStorage::_internal._userData.find(i) != UserDataStorage::_internal._userData.end();
|
return UserDataStorage::_internal._userData.find(i) != UserDataStorage::_internal._userData.end();
|
||||||
}
|
}
|
||||||
|
|
||||||
static shared_ptr<UserData> GetUserDataType(uint32_t i) {
|
static UserData* GetUserDataType(uint32_t i) {
|
||||||
return UserDataStorage::_internal._userData[i];
|
return UserDataStorage::_internal._userData[i];
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -9,11 +9,11 @@
|
||||||
|
|
||||||
namespace Porygon::UserData {
|
namespace Porygon::UserData {
|
||||||
class UserDataValue : public Evaluation::EvalValue {
|
class UserDataValue : public Evaluation::EvalValue {
|
||||||
const shared_ptr<UserData> _userData;
|
const UserData* _userData;
|
||||||
void *_obj;
|
void *_obj;
|
||||||
public:
|
public:
|
||||||
UserDataValue(shared_ptr<UserData> userData, void *obj)
|
UserDataValue(const UserData* userData, void *obj)
|
||||||
: _userData(std::move(userData)) {
|
: _userData(userData) {
|
||||||
_obj = obj;
|
_obj = obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -68,6 +68,7 @@ TEST_CASE( "True or False", "[integration]" ) {
|
||||||
script->Evaluate();
|
script->Evaluate();
|
||||||
auto lastValue = script->GetLastValue();
|
auto lastValue = script->GetLastValue();
|
||||||
REQUIRE(lastValue->EvaluateBool());
|
REQUIRE(lastValue->EvaluateBool());
|
||||||
|
delete script;
|
||||||
}
|
}
|
||||||
TEST_CASE( "False or True", "[integration]" ) {
|
TEST_CASE( "False or True", "[integration]" ) {
|
||||||
auto script = Script::Create("false or true");
|
auto script = Script::Create("false or true");
|
||||||
|
|
|
@ -34,6 +34,7 @@ TEST_CASE( "Integer Subtraction", "[integration]" ) {
|
||||||
script->Evaluate();
|
script->Evaluate();
|
||||||
auto lastValue = script->GetLastValue();
|
auto lastValue = script->GetLastValue();
|
||||||
REQUIRE(lastValue->EvaluateInteger() == -4);
|
REQUIRE(lastValue->EvaluateInteger() == -4);
|
||||||
|
delete script;
|
||||||
}
|
}
|
||||||
TEST_CASE( "Integer Multiplication", "[integration]" ) {
|
TEST_CASE( "Integer Multiplication", "[integration]" ) {
|
||||||
auto script = Script::Create("5 * 8");
|
auto script = Script::Create("5 * 8");
|
||||||
|
|
|
@ -47,7 +47,7 @@ private:
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
static GenericFunctionScriptType* AdditionFunctionType;
|
static GenericFunctionScriptType* AdditionFunctionType();
|
||||||
|
|
||||||
static EvalValue* GetAdditionFunction(void* obj){
|
static EvalValue* GetAdditionFunction(void* obj){
|
||||||
return new UserDataFunction(CallAddition, obj);
|
return new UserDataFunction(CallAddition, obj);
|
||||||
|
@ -66,18 +66,19 @@ public:
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
HashedString::ConstHash("Addition"),
|
HashedString::ConstHash("Addition"),
|
||||||
new UserDataField(AdditionFunctionType, GetAdditionFunction, nullptr)
|
new UserDataField(AdditionFunctionType(), GetAdditionFunction, nullptr)
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
GenericFunctionScriptType* UserDataTestObject::AdditionFunctionType =
|
GenericFunctionScriptType* UserDataTestObject::AdditionFunctionType(){
|
||||||
new UserDataFunctionType(make_shared<NumericScriptType>(true, false),
|
return new UserDataFunctionType(make_shared<NumericScriptType>(true, false),
|
||||||
vector<shared_ptr<ScriptType>>{
|
vector<shared_ptr<ScriptType>>{
|
||||||
make_shared<NumericScriptType>(true, false),
|
make_shared<NumericScriptType>(true, false),
|
||||||
make_shared<NumericScriptType>(true, false)
|
make_shared<NumericScriptType>(true, false)
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
TEST_CASE( "Gets UserData value", "[integration]" ) {
|
TEST_CASE( "Gets UserData value", "[integration]" ) {
|
||||||
UserDataStorage::RegisterType(HashedString::ConstHash("testObject"), UserDataTestObject::CreateData());
|
UserDataStorage::RegisterType(HashedString::ConstHash("testObject"), UserDataTestObject::CreateData());
|
||||||
|
@ -88,11 +89,15 @@ end
|
||||||
)");
|
)");
|
||||||
REQUIRE(!script->Diagnostics -> HasErrors());
|
REQUIRE(!script->Diagnostics -> HasErrors());
|
||||||
script->Evaluate();
|
script->Evaluate();
|
||||||
auto parameter = new UserDataValue(HashedString::ConstHash("testObject"), new UserDataTestObject());
|
auto par = new UserDataTestObject();
|
||||||
|
auto parameter = new UserDataValue(HashedString::ConstHash("testObject"), par);
|
||||||
auto variable = script->CallFunction(u"testFunc", {parameter});
|
auto variable = script->CallFunction(u"testFunc", {parameter});
|
||||||
REQUIRE(variable != nullptr);
|
REQUIRE(variable != nullptr);
|
||||||
REQUIRE(variable->EvaluateInteger() == 10);
|
REQUIRE(variable->EvaluateInteger() == 10);
|
||||||
|
delete par;
|
||||||
|
delete parameter;
|
||||||
delete script;
|
delete script;
|
||||||
|
UserDataStorage::ClearTypes();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Sets UserData value", "[integration]" ) {
|
TEST_CASE( "Sets UserData value", "[integration]" ) {
|
||||||
|
@ -111,6 +116,7 @@ end
|
||||||
REQUIRE(obj->foo == 5000);
|
REQUIRE(obj->foo == 5000);
|
||||||
delete obj;
|
delete obj;
|
||||||
delete parameter;
|
delete parameter;
|
||||||
|
UserDataStorage::ClearTypes();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Calls UserData function", "[integration]" ) {
|
TEST_CASE( "Calls UserData function", "[integration]" ) {
|
||||||
|
@ -129,6 +135,7 @@ end
|
||||||
delete script;
|
delete script;
|
||||||
delete obj;
|
delete obj;
|
||||||
delete parameter;
|
delete parameter;
|
||||||
|
UserDataStorage::ClearTypes();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Calls UserData function with parameters", "[integration]" ) {
|
TEST_CASE( "Calls UserData function with parameters", "[integration]" ) {
|
||||||
|
@ -147,6 +154,7 @@ end
|
||||||
delete script;
|
delete script;
|
||||||
delete obj;
|
delete obj;
|
||||||
delete parameter;
|
delete parameter;
|
||||||
|
UserDataStorage::ClearTypes();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@ TEST_CASE( "Peek doesn't advance", "[lexer]" ) {
|
||||||
REQUIRE(lexer.Peek() == '5');
|
REQUIRE(lexer.Peek() == '5');
|
||||||
REQUIRE(lexer.Peek() == '5');
|
REQUIRE(lexer.Peek() == '5');
|
||||||
REQUIRE(lexer.Peek() == '5');
|
REQUIRE(lexer.Peek() == '5');
|
||||||
|
delete script;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Next does advance", "[lexer]" ) {
|
TEST_CASE( "Next does advance", "[lexer]" ) {
|
||||||
|
@ -27,36 +28,49 @@ TEST_CASE( "Next does advance", "[lexer]" ) {
|
||||||
REQUIRE(lexer.Next() == ' ');
|
REQUIRE(lexer.Next() == ' ');
|
||||||
REQUIRE(lexer.Next() == '5');
|
REQUIRE(lexer.Next() == '5');
|
||||||
REQUIRE(lexer.Next() == '\0');
|
REQUIRE(lexer.Next() == '\0');
|
||||||
|
delete script;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Lex Null Terminator as EOF", "[lexer]" ) {
|
TEST_CASE( "Lex Null Terminator as EOF", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"", nullptr);
|
Lexer lexer = Lexer(u"", nullptr);
|
||||||
REQUIRE(lexer.LexNext('\0') -> GetKind() == TokenKind::EndOfFile);
|
auto next = lexer.LexNext('\0');
|
||||||
|
REQUIRE(next -> GetKind() == TokenKind::EndOfFile);
|
||||||
|
delete next;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Lex Plus Token", "[lexer]" ) {
|
TEST_CASE( "Lex Plus Token", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"", nullptr);
|
Lexer lexer = Lexer(u"", nullptr);
|
||||||
REQUIRE(lexer.LexNext('+') -> GetKind() == TokenKind::PlusToken);
|
auto next = lexer.LexNext('+');
|
||||||
|
REQUIRE(next -> GetKind() == TokenKind::PlusToken);
|
||||||
|
delete next;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Lex Minus Token", "[lexer]" ) {
|
TEST_CASE( "Lex Minus Token", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"", nullptr);
|
Lexer lexer = Lexer(u"", nullptr);
|
||||||
REQUIRE(lexer.LexNext('-') -> GetKind() == TokenKind::MinusToken);
|
auto next = lexer.LexNext('-');
|
||||||
|
REQUIRE(next -> GetKind() == TokenKind::MinusToken);
|
||||||
|
delete next;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Lex Slash Token", "[lexer]" ) {
|
TEST_CASE( "Lex Slash Token", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"", nullptr);
|
Lexer lexer = Lexer(u"", nullptr);
|
||||||
REQUIRE(lexer.LexNext('/') -> GetKind() == TokenKind::SlashToken);
|
auto next = lexer.LexNext('/');
|
||||||
|
REQUIRE(next -> GetKind() == TokenKind::SlashToken);
|
||||||
|
delete next;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Lex Star Token", "[lexer]" ) {
|
TEST_CASE( "Lex Star Token", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"", nullptr);
|
Lexer lexer = Lexer(u"", nullptr);
|
||||||
REQUIRE(lexer.LexNext('*') -> GetKind() == TokenKind::StarToken);
|
auto next = lexer.LexNext('*');
|
||||||
|
REQUIRE(next -> GetKind() == TokenKind::StarToken);
|
||||||
|
delete next;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Lex Assignment Token", "[lexer]" ) {
|
TEST_CASE( "Lex Assignment Token", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"", nullptr);
|
Lexer lexer = Lexer(u"", nullptr);
|
||||||
REQUIRE(lexer.LexNext('=') -> GetKind() == TokenKind::AssignmentToken);
|
auto next = lexer.LexNext('=');
|
||||||
|
REQUIRE(next -> GetKind() == TokenKind::AssignmentToken);
|
||||||
|
delete next;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Lex Equality Token", "[lexer]" ) {
|
TEST_CASE( "Lex Equality Token", "[lexer]" ) {
|
||||||
|
@ -65,30 +79,30 @@ TEST_CASE( "Lex Equality Token", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::EqualityToken);
|
REQUIRE(firstToken -> GetKind() == TokenKind::EqualityToken);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Lex Whitespace", "[lexer]" ) {
|
TEST_CASE( "Lex Whitespace", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"", nullptr);
|
Lexer lexer = Lexer(u"", nullptr);
|
||||||
CHECK(lexer.LexNext(' ') -> GetKind() == TokenKind::WhiteSpace);
|
|
||||||
CHECK(lexer.LexNext('\t') -> GetKind() == TokenKind::WhiteSpace);
|
vector<char> whitespace {' ', '\t', '\n', '\r', '\v', '\f'};
|
||||||
CHECK(lexer.LexNext('\n') -> GetKind() == TokenKind::WhiteSpace);
|
for (char c: whitespace){
|
||||||
CHECK(lexer.LexNext('\r') -> GetKind() == TokenKind::WhiteSpace);
|
auto t = lexer.LexNext(c);
|
||||||
CHECK(lexer.LexNext('\v') -> GetKind() == TokenKind::WhiteSpace);
|
CHECK(t -> GetKind() == TokenKind::WhiteSpace);
|
||||||
CHECK(lexer.LexNext('\f') -> GetKind() == TokenKind::WhiteSpace);
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Lex Basic Digits", "[lexer]" ) {
|
TEST_CASE( "Lex Basic Digits", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"", nullptr);
|
Lexer lexer = Lexer(u"", nullptr);
|
||||||
CHECK(lexer.LexNext('0') -> GetKind() == TokenKind::Integer);
|
vector<char> ints {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'};
|
||||||
CHECK(lexer.LexNext('1') -> GetKind() == TokenKind::Integer);
|
for (char c: ints){
|
||||||
CHECK(lexer.LexNext('2') -> GetKind() == TokenKind::Integer);
|
auto t = lexer.LexNext(c);
|
||||||
CHECK(lexer.LexNext('3') -> GetKind() == TokenKind::Integer);
|
CHECK(t -> GetKind() == TokenKind::Integer);
|
||||||
CHECK(lexer.LexNext('4') -> GetKind() == TokenKind::Integer);
|
delete t;
|
||||||
CHECK(lexer.LexNext('5') -> GetKind() == TokenKind::Integer);
|
}
|
||||||
CHECK(lexer.LexNext('6') -> GetKind() == TokenKind::Integer);
|
|
||||||
CHECK(lexer.LexNext('7') -> GetKind() == TokenKind::Integer);
|
|
||||||
CHECK(lexer.LexNext('8') -> GetKind() == TokenKind::Integer);
|
|
||||||
CHECK(lexer.LexNext('9') -> GetKind() == TokenKind::Integer);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::u16string to_u16string(long const &i) {
|
std::u16string to_u16string(long const &i) {
|
||||||
|
@ -107,6 +121,10 @@ TEST_CASE( "Lex Longer Integers", "[lexer]" ) {
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::Integer);
|
REQUIRE(firstToken -> GetKind() == TokenKind::Integer);
|
||||||
auto* integerToken = (IntegerToken *)firstToken;
|
auto* integerToken = (IntegerToken *)firstToken;
|
||||||
CHECK(integerToken -> GetValue() == integer);
|
CHECK(integerToken -> GetValue() == integer);
|
||||||
|
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,6 +143,10 @@ TEST_CASE( "Lex Floats", "[lexer]" ) {
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::Float);
|
REQUIRE(firstToken -> GetKind() == TokenKind::Float);
|
||||||
auto* floatToken = (FloatToken *)firstToken;
|
auto* floatToken = (FloatToken *)firstToken;
|
||||||
CHECK(floatToken -> GetValue() == Approx(f));
|
CHECK(floatToken -> GetValue() == Approx(f));
|
||||||
|
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,6 +156,9 @@ TEST_CASE( "Lex And Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::AndKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::AndKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex Break Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex Break Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"break", nullptr);
|
Lexer lexer = Lexer(u"break", nullptr);
|
||||||
|
@ -141,6 +166,9 @@ TEST_CASE( "Lex Break Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::BreakKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::BreakKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex Do Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex Do Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"do", nullptr);
|
Lexer lexer = Lexer(u"do", nullptr);
|
||||||
|
@ -148,6 +176,9 @@ TEST_CASE( "Lex Do Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::DoKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::DoKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex else Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex else Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"else", nullptr);
|
Lexer lexer = Lexer(u"else", nullptr);
|
||||||
|
@ -155,6 +186,9 @@ TEST_CASE( "Lex else Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::ElseKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::ElseKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex else if Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex else if Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"elseif", nullptr);
|
Lexer lexer = Lexer(u"elseif", nullptr);
|
||||||
|
@ -162,6 +196,9 @@ TEST_CASE( "Lex else if Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::ElseIfKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::ElseIfKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex end Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex end Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"end", nullptr);
|
Lexer lexer = Lexer(u"end", nullptr);
|
||||||
|
@ -169,6 +206,9 @@ TEST_CASE( "Lex end Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::EndKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::EndKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex false Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex false Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"false", nullptr);
|
Lexer lexer = Lexer(u"false", nullptr);
|
||||||
|
@ -176,6 +216,9 @@ TEST_CASE( "Lex false Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::FalseKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::FalseKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex for Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex for Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"for", nullptr);
|
Lexer lexer = Lexer(u"for", nullptr);
|
||||||
|
@ -183,6 +226,9 @@ TEST_CASE( "Lex for Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::ForKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::ForKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex function Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex function Keyword", "[lexer]" ) {
|
||||||
auto s = new u16string(u"function");
|
auto s = new u16string(u"function");
|
||||||
|
@ -191,6 +237,10 @@ TEST_CASE( "Lex function Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::FunctionKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::FunctionKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete s;
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex if Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex if Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"if", nullptr);
|
Lexer lexer = Lexer(u"if", nullptr);
|
||||||
|
@ -198,6 +248,9 @@ TEST_CASE( "Lex if Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::IfKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::IfKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex in Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex in Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"in", nullptr);
|
Lexer lexer = Lexer(u"in", nullptr);
|
||||||
|
@ -205,6 +258,9 @@ TEST_CASE( "Lex in Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::InKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::InKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex local Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex local Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"local", nullptr);
|
Lexer lexer = Lexer(u"local", nullptr);
|
||||||
|
@ -212,6 +268,9 @@ TEST_CASE( "Lex local Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::LocalKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::LocalKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex nil Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex nil Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"nil", nullptr);
|
Lexer lexer = Lexer(u"nil", nullptr);
|
||||||
|
@ -219,6 +278,9 @@ TEST_CASE( "Lex nil Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::NilKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::NilKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex not Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex not Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"not", nullptr);
|
Lexer lexer = Lexer(u"not", nullptr);
|
||||||
|
@ -226,6 +288,9 @@ TEST_CASE( "Lex not Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::NotKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::NotKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex or Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex or Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"or", nullptr);
|
Lexer lexer = Lexer(u"or", nullptr);
|
||||||
|
@ -233,6 +298,9 @@ TEST_CASE( "Lex or Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::OrKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::OrKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex return Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex return Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"return", nullptr);
|
Lexer lexer = Lexer(u"return", nullptr);
|
||||||
|
@ -240,6 +308,9 @@ TEST_CASE( "Lex return Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::ReturnKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::ReturnKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex then Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex then Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"then", nullptr);
|
Lexer lexer = Lexer(u"then", nullptr);
|
||||||
|
@ -247,6 +318,9 @@ TEST_CASE( "Lex then Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::ThenKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::ThenKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex true Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex true Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"true", nullptr);
|
Lexer lexer = Lexer(u"true", nullptr);
|
||||||
|
@ -254,6 +328,9 @@ TEST_CASE( "Lex true Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::TrueKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::TrueKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TEST_CASE( "Lex while Keyword", "[lexer]" ) {
|
TEST_CASE( "Lex while Keyword", "[lexer]" ) {
|
||||||
Lexer lexer = Lexer(u"while", nullptr);
|
Lexer lexer = Lexer(u"while", nullptr);
|
||||||
|
@ -261,6 +338,9 @@ TEST_CASE( "Lex while Keyword", "[lexer]" ) {
|
||||||
REQUIRE(tokens.size() == 2);
|
REQUIRE(tokens.size() == 2);
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::WhileKeyword);
|
REQUIRE(firstToken -> GetKind() == TokenKind::WhileKeyword);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Lex identifier", "[lexer]" ) {
|
TEST_CASE( "Lex identifier", "[lexer]" ) {
|
||||||
|
@ -270,6 +350,9 @@ TEST_CASE( "Lex identifier", "[lexer]" ) {
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::Identifier);
|
REQUIRE(firstToken -> GetKind() == TokenKind::Identifier);
|
||||||
REQUIRE(((IdentifierToken*)firstToken) -> GetValue() == HashedString("foo"));
|
REQUIRE(((IdentifierToken*)firstToken) -> GetValue() == HashedString("foo"));
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Lex Start Position", "[lexer]" ) {
|
TEST_CASE( "Lex Start Position", "[lexer]" ) {
|
||||||
|
@ -282,6 +365,10 @@ TEST_CASE( "Lex Start Position", "[lexer]" ) {
|
||||||
CHECK(((IdentifierToken*)tokens[2]) -> GetStartPosition() == 4);
|
CHECK(((IdentifierToken*)tokens[2]) -> GetStartPosition() == 4);
|
||||||
CHECK(((IdentifierToken*)tokens[3]) -> GetStartPosition() == 8);
|
CHECK(((IdentifierToken*)tokens[3]) -> GetStartPosition() == 8);
|
||||||
CHECK(((IdentifierToken*)tokens[4]) -> GetStartPosition() == 12);
|
CHECK(((IdentifierToken*)tokens[4]) -> GetStartPosition() == 12);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete s;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Lex End Position", "[lexer]" ) {
|
TEST_CASE( "Lex End Position", "[lexer]" ) {
|
||||||
|
@ -294,6 +381,10 @@ TEST_CASE( "Lex End Position", "[lexer]" ) {
|
||||||
CHECK(((IdentifierToken*)tokens[2]) -> GetEndPosition() == 6);
|
CHECK(((IdentifierToken*)tokens[2]) -> GetEndPosition() == 6);
|
||||||
CHECK(((IdentifierToken*)tokens[3]) -> GetEndPosition() == 11);
|
CHECK(((IdentifierToken*)tokens[3]) -> GetEndPosition() == 11);
|
||||||
CHECK(((IdentifierToken*)tokens[4]) -> GetEndPosition() == 12);
|
CHECK(((IdentifierToken*)tokens[4]) -> GetEndPosition() == 12);
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete s;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Lex Double Quote String", "[lexer]") {
|
TEST_CASE("Lex Double Quote String", "[lexer]") {
|
||||||
|
@ -304,6 +395,10 @@ TEST_CASE("Lex Double Quote String", "[lexer]") {
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::String);
|
REQUIRE(firstToken -> GetKind() == TokenKind::String);
|
||||||
REQUIRE(((StringToken*)firstToken) -> GetValue() == u"foo bar");
|
REQUIRE(((StringToken*)firstToken) -> GetValue() == u"foo bar");
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete s;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Lex Single Quote String", "[lexer]") {
|
TEST_CASE("Lex Single Quote String", "[lexer]") {
|
||||||
|
@ -314,6 +409,10 @@ TEST_CASE("Lex Single Quote String", "[lexer]") {
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::String);
|
REQUIRE(firstToken -> GetKind() == TokenKind::String);
|
||||||
REQUIRE(((StringToken*)firstToken) -> GetValue() == u"foo bar");
|
REQUIRE(((StringToken*)firstToken) -> GetValue() == u"foo bar");
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete s;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Lex Double Quote String, Escape Quote", "[lexer]") {
|
TEST_CASE("Lex Double Quote String, Escape Quote", "[lexer]") {
|
||||||
|
@ -324,6 +423,10 @@ TEST_CASE("Lex Double Quote String, Escape Quote", "[lexer]") {
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::String);
|
REQUIRE(firstToken -> GetKind() == TokenKind::String);
|
||||||
REQUIRE(((StringToken*)firstToken) -> GetValue() == u"foo\"bar");
|
REQUIRE(((StringToken*)firstToken) -> GetValue() == u"foo\"bar");
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete s;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("Lex String with newline", "[lexer]") {
|
TEST_CASE("Lex String with newline", "[lexer]") {
|
||||||
|
@ -334,6 +437,10 @@ TEST_CASE("Lex String with newline", "[lexer]") {
|
||||||
const IToken* firstToken = tokens[0];
|
const IToken* firstToken = tokens[0];
|
||||||
REQUIRE(firstToken -> GetKind() == TokenKind::String);
|
REQUIRE(firstToken -> GetKind() == TokenKind::String);
|
||||||
REQUIRE(((StringToken*)firstToken) -> GetValue() == u"foo\nbar");
|
REQUIRE(((StringToken*)firstToken) -> GetValue() == u"foo\nbar");
|
||||||
|
for (auto t: tokens){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,8 @@ using namespace Porygon::Parser;
|
||||||
TEST_CASE( "Parse single true keyword", "[parser]" ) {
|
TEST_CASE( "Parse single true keyword", "[parser]" ) {
|
||||||
vector<const IToken*> v {new SimpleToken(TokenKind::TrueKeyword,0,0), new SimpleToken(TokenKind::EndOfFile,0,0)};
|
vector<const IToken*> v {new SimpleToken(TokenKind::TrueKeyword,0,0), new SimpleToken(TokenKind::EndOfFile,0,0)};
|
||||||
Parser parser = Parser(v, nullptr);
|
Parser parser = Parser(v, nullptr);
|
||||||
auto parsedStatements = parser.Parse() -> GetStatements();
|
auto parsedScript = parser.Parse();
|
||||||
|
auto parsedStatements = parsedScript -> GetStatements();
|
||||||
REQUIRE(parsedStatements->size() == 1);
|
REQUIRE(parsedStatements->size() == 1);
|
||||||
auto firstStatement = parsedStatements -> at(0);
|
auto firstStatement = parsedStatements -> at(0);
|
||||||
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
||||||
|
@ -16,12 +17,18 @@ TEST_CASE( "Parse single true keyword", "[parser]" ) {
|
||||||
REQUIRE(expression -> GetKind() == ParsedExpressionKind::LiteralBool);
|
REQUIRE(expression -> GetKind() == ParsedExpressionKind::LiteralBool);
|
||||||
auto boolean = ((LiteralBoolExpression*)expression);
|
auto boolean = ((LiteralBoolExpression*)expression);
|
||||||
REQUIRE(boolean->GetValue());
|
REQUIRE(boolean->GetValue());
|
||||||
|
|
||||||
|
for (auto t : v){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete parsedScript;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Parse single false keyword", "[parser]" ) {
|
TEST_CASE( "Parse single false keyword", "[parser]" ) {
|
||||||
vector<const IToken*> v {new SimpleToken(TokenKind::FalseKeyword,0,0), new SimpleToken(TokenKind::EndOfFile,0,0)};
|
vector<const IToken*> v {new SimpleToken(TokenKind::FalseKeyword,0,0), new SimpleToken(TokenKind::EndOfFile,0,0)};
|
||||||
Parser parser = Parser(v, nullptr);
|
Parser parser = Parser(v, nullptr);
|
||||||
auto parsedStatements = parser.Parse() -> GetStatements();
|
auto parsedScript = parser.Parse();
|
||||||
|
auto parsedStatements = parsedScript -> GetStatements();
|
||||||
REQUIRE(parsedStatements->size() == 1);
|
REQUIRE(parsedStatements->size() == 1);
|
||||||
auto firstStatement = parsedStatements -> at(0);
|
auto firstStatement = parsedStatements -> at(0);
|
||||||
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
||||||
|
@ -29,6 +36,11 @@ TEST_CASE( "Parse single false keyword", "[parser]" ) {
|
||||||
REQUIRE(expression -> GetKind() == ParsedExpressionKind::LiteralBool);
|
REQUIRE(expression -> GetKind() == ParsedExpressionKind::LiteralBool);
|
||||||
auto boolean = ((LiteralBoolExpression*)expression);
|
auto boolean = ((LiteralBoolExpression*)expression);
|
||||||
REQUIRE_FALSE(boolean->GetValue());
|
REQUIRE_FALSE(boolean->GetValue());
|
||||||
|
|
||||||
|
for (auto t : v){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete parsedScript;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Parse simple addition", "[parser]" ) {
|
TEST_CASE( "Parse simple addition", "[parser]" ) {
|
||||||
|
@ -39,7 +51,8 @@ TEST_CASE( "Parse simple addition", "[parser]" ) {
|
||||||
new SimpleToken(TokenKind::EndOfFile,0,0)
|
new SimpleToken(TokenKind::EndOfFile,0,0)
|
||||||
};
|
};
|
||||||
Parser parser = Parser(v, nullptr);
|
Parser parser = Parser(v, nullptr);
|
||||||
auto parsedStatements = parser.Parse() -> GetStatements();
|
auto parsedScript = parser.Parse();
|
||||||
|
auto parsedStatements = parsedScript -> GetStatements();
|
||||||
REQUIRE(parsedStatements->size() == 1);
|
REQUIRE(parsedStatements->size() == 1);
|
||||||
auto firstStatement = parsedStatements -> at(0);
|
auto firstStatement = parsedStatements -> at(0);
|
||||||
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
||||||
|
@ -53,6 +66,11 @@ TEST_CASE( "Parse simple addition", "[parser]" ) {
|
||||||
REQUIRE(right->GetKind() == ParsedExpressionKind::LiteralInteger);
|
REQUIRE(right->GetKind() == ParsedExpressionKind::LiteralInteger);
|
||||||
CHECK(((LiteralIntegerExpression*)left)->GetValue() == 5);
|
CHECK(((LiteralIntegerExpression*)left)->GetValue() == 5);
|
||||||
CHECK(((LiteralIntegerExpression*)right)->GetValue() == 10);
|
CHECK(((LiteralIntegerExpression*)right)->GetValue() == 10);
|
||||||
|
|
||||||
|
for (auto t : v){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete parsedScript;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Parse simple negation", "[parser]" ) {
|
TEST_CASE( "Parse simple negation", "[parser]" ) {
|
||||||
|
@ -62,7 +80,8 @@ TEST_CASE( "Parse simple negation", "[parser]" ) {
|
||||||
new SimpleToken(TokenKind::EndOfFile,0,0)
|
new SimpleToken(TokenKind::EndOfFile,0,0)
|
||||||
};
|
};
|
||||||
Parser parser = Parser(v, nullptr);
|
Parser parser = Parser(v, nullptr);
|
||||||
auto parsedStatements = parser.Parse() -> GetStatements();
|
auto parsedScript = parser.Parse();
|
||||||
|
auto parsedStatements = parsedScript -> GetStatements();
|
||||||
REQUIRE(parsedStatements->size() == 1);
|
REQUIRE(parsedStatements->size() == 1);
|
||||||
auto firstStatement = parsedStatements -> at(0);
|
auto firstStatement = parsedStatements -> at(0);
|
||||||
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
||||||
|
@ -73,6 +92,10 @@ TEST_CASE( "Parse simple negation", "[parser]" ) {
|
||||||
auto operand = unary->GetOperand();
|
auto operand = unary->GetOperand();
|
||||||
REQUIRE(operand->GetKind() == ParsedExpressionKind::LiteralInteger);
|
REQUIRE(operand->GetKind() == ParsedExpressionKind::LiteralInteger);
|
||||||
CHECK(((LiteralIntegerExpression*)operand)->GetValue() == 10);
|
CHECK(((LiteralIntegerExpression*)operand)->GetValue() == 10);
|
||||||
|
for (auto t : v){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete parsedScript;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Parse logical negation", "[parser]" ) {
|
TEST_CASE( "Parse logical negation", "[parser]" ) {
|
||||||
|
@ -82,7 +105,8 @@ TEST_CASE( "Parse logical negation", "[parser]" ) {
|
||||||
new SimpleToken(TokenKind::EndOfFile,0,0)
|
new SimpleToken(TokenKind::EndOfFile,0,0)
|
||||||
};
|
};
|
||||||
Parser parser = Parser(v, nullptr);
|
Parser parser = Parser(v, nullptr);
|
||||||
auto parsedStatements = parser.Parse() -> GetStatements();
|
auto parsedScript = parser.Parse();
|
||||||
|
auto parsedStatements = parsedScript -> GetStatements();
|
||||||
REQUIRE(parsedStatements->size() == 1);
|
REQUIRE(parsedStatements->size() == 1);
|
||||||
auto firstStatement = parsedStatements -> at(0);
|
auto firstStatement = parsedStatements -> at(0);
|
||||||
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
||||||
|
@ -93,6 +117,10 @@ TEST_CASE( "Parse logical negation", "[parser]" ) {
|
||||||
auto operand = unary->GetOperand();
|
auto operand = unary->GetOperand();
|
||||||
REQUIRE(operand->GetKind() == ParsedExpressionKind::LiteralBool);
|
REQUIRE(operand->GetKind() == ParsedExpressionKind::LiteralBool);
|
||||||
CHECK_FALSE(((LiteralBoolExpression*)operand)->GetValue());
|
CHECK_FALSE(((LiteralBoolExpression*)operand)->GetValue());
|
||||||
|
for (auto t : v){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete parsedScript;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Are parenthesized expressions valid", "[parser]" ) {
|
TEST_CASE( "Are parenthesized expressions valid", "[parser]" ) {
|
||||||
|
@ -105,7 +133,8 @@ TEST_CASE( "Are parenthesized expressions valid", "[parser]" ) {
|
||||||
new SimpleToken(TokenKind::EndOfFile,0,0)
|
new SimpleToken(TokenKind::EndOfFile,0,0)
|
||||||
};
|
};
|
||||||
Parser parser = Parser(v, nullptr);
|
Parser parser = Parser(v, nullptr);
|
||||||
auto parsedStatements = parser.Parse() -> GetStatements();
|
auto parsedScript = parser.Parse();
|
||||||
|
auto parsedStatements = parsedScript -> GetStatements();
|
||||||
REQUIRE(parsedStatements->size() == 1);
|
REQUIRE(parsedStatements->size() == 1);
|
||||||
auto firstStatement = parsedStatements -> at(0);
|
auto firstStatement = parsedStatements -> at(0);
|
||||||
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
||||||
|
@ -122,6 +151,11 @@ TEST_CASE( "Are parenthesized expressions valid", "[parser]" ) {
|
||||||
right = ((BinaryExpression*)right)->GetRight();
|
right = ((BinaryExpression*)right)->GetRight();
|
||||||
CHECK(((LiteralIntegerExpression*)left)->GetValue() == 10);
|
CHECK(((LiteralIntegerExpression*)left)->GetValue() == 10);
|
||||||
CHECK(((LiteralIntegerExpression*)right)->GetValue() == 6);
|
CHECK(((LiteralIntegerExpression*)right)->GetValue() == 6);
|
||||||
|
|
||||||
|
for (auto t : v){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete parsedScript;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Assert binary precedence", "[parser]" ) {
|
TEST_CASE( "Assert binary precedence", "[parser]" ) {
|
||||||
|
@ -132,7 +166,8 @@ TEST_CASE( "Assert binary precedence", "[parser]" ) {
|
||||||
new SimpleToken(TokenKind::EndOfFile,0,0)
|
new SimpleToken(TokenKind::EndOfFile,0,0)
|
||||||
};
|
};
|
||||||
Parser parser = Parser(v, nullptr);
|
Parser parser = Parser(v, nullptr);
|
||||||
auto parsedStatements = parser.Parse() -> GetStatements();
|
auto parsedScript = parser.Parse();
|
||||||
|
auto parsedStatements = parsedScript -> GetStatements();
|
||||||
REQUIRE(parsedStatements->size() == 1);
|
REQUIRE(parsedStatements->size() == 1);
|
||||||
auto firstStatement = parsedStatements -> at(0);
|
auto firstStatement = parsedStatements -> at(0);
|
||||||
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
||||||
|
@ -140,12 +175,18 @@ TEST_CASE( "Assert binary precedence", "[parser]" ) {
|
||||||
REQUIRE(expression -> GetKind() == ParsedExpressionKind::Parenthesized);
|
REQUIRE(expression -> GetKind() == ParsedExpressionKind::Parenthesized);
|
||||||
auto innerExpression = ((ParenthesizedExpression*)expression) -> GetInnerExpression();
|
auto innerExpression = ((ParenthesizedExpression*)expression) -> GetInnerExpression();
|
||||||
REQUIRE(innerExpression -> GetKind() == ParsedExpressionKind::LiteralInteger);
|
REQUIRE(innerExpression -> GetKind() == ParsedExpressionKind::LiteralInteger);
|
||||||
|
|
||||||
|
for (auto t : v){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete parsedScript;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Parse String Tokens", "[parser]" ) {
|
TEST_CASE( "Parse String Tokens", "[parser]" ) {
|
||||||
vector<const IToken*> v {new StringToken(u"foo bar", 0,0), new SimpleToken(TokenKind::EndOfFile,0,0)};
|
vector<const IToken*> v {new StringToken(u"foo bar", 0,0), new SimpleToken(TokenKind::EndOfFile,0,0)};
|
||||||
Parser parser = Parser(v, nullptr);
|
Parser parser = Parser(v, nullptr);
|
||||||
auto parsedStatements = parser.Parse() -> GetStatements();
|
auto parsedScript = parser.Parse();
|
||||||
|
auto parsedStatements = parsedScript -> GetStatements();
|
||||||
REQUIRE(parsedStatements->size() == 1);
|
REQUIRE(parsedStatements->size() == 1);
|
||||||
auto firstStatement = parsedStatements -> at(0);
|
auto firstStatement = parsedStatements -> at(0);
|
||||||
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Expression);
|
||||||
|
@ -153,6 +194,11 @@ TEST_CASE( "Parse String Tokens", "[parser]" ) {
|
||||||
REQUIRE(expression -> GetKind() == ParsedExpressionKind::LiteralString);
|
REQUIRE(expression -> GetKind() == ParsedExpressionKind::LiteralString);
|
||||||
auto boolean = ((LiteralStringExpression*)expression);
|
auto boolean = ((LiteralStringExpression*)expression);
|
||||||
REQUIRE(boolean->GetValue() == u"foo bar");
|
REQUIRE(boolean->GetValue() == u"foo bar");
|
||||||
|
|
||||||
|
for (auto t : v){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete parsedScript;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Parse Global Assignment", "[parser]" ) {
|
TEST_CASE( "Parse Global Assignment", "[parser]" ) {
|
||||||
|
@ -163,7 +209,8 @@ TEST_CASE( "Parse Global Assignment", "[parser]" ) {
|
||||||
new SimpleToken(TokenKind::EndOfFile,0,0)
|
new SimpleToken(TokenKind::EndOfFile,0,0)
|
||||||
};
|
};
|
||||||
Parser parser = Parser(v, nullptr);
|
Parser parser = Parser(v, nullptr);
|
||||||
auto parsedStatements = parser.Parse() -> GetStatements();
|
auto parsedScript = parser.Parse();
|
||||||
|
auto parsedStatements = parsedScript -> GetStatements();
|
||||||
REQUIRE(parsedStatements->size() == 1);
|
REQUIRE(parsedStatements->size() == 1);
|
||||||
auto firstStatement = parsedStatements -> at(0);
|
auto firstStatement = parsedStatements -> at(0);
|
||||||
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Assignment);
|
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Assignment);
|
||||||
|
@ -171,6 +218,11 @@ TEST_CASE( "Parse Global Assignment", "[parser]" ) {
|
||||||
REQUIRE(!assignment -> IsLocal());
|
REQUIRE(!assignment -> IsLocal());
|
||||||
REQUIRE(assignment->GetIdentifier().GetHash() == HashedString("foo").GetHash());
|
REQUIRE(assignment->GetIdentifier().GetHash() == HashedString("foo").GetHash());
|
||||||
REQUIRE(((LiteralBoolExpression*)assignment->GetExpression()) -> GetValue());
|
REQUIRE(((LiteralBoolExpression*)assignment->GetExpression()) -> GetValue());
|
||||||
|
|
||||||
|
for (auto t : v){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete parsedScript;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Parse local Assignment", "[parser]" ) {
|
TEST_CASE( "Parse local Assignment", "[parser]" ) {
|
||||||
|
@ -182,7 +234,8 @@ TEST_CASE( "Parse local Assignment", "[parser]" ) {
|
||||||
new SimpleToken(TokenKind::EndOfFile,0,0)
|
new SimpleToken(TokenKind::EndOfFile,0,0)
|
||||||
};
|
};
|
||||||
Parser parser = Parser(v, nullptr);
|
Parser parser = Parser(v, nullptr);
|
||||||
auto parsedStatements = parser.Parse() -> GetStatements();
|
auto parsedScript = parser.Parse();
|
||||||
|
auto parsedStatements = parsedScript -> GetStatements();
|
||||||
REQUIRE(parsedStatements->size() == 1);
|
REQUIRE(parsedStatements->size() == 1);
|
||||||
auto firstStatement = parsedStatements -> at(0);
|
auto firstStatement = parsedStatements -> at(0);
|
||||||
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Assignment);
|
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::Assignment);
|
||||||
|
@ -190,6 +243,12 @@ TEST_CASE( "Parse local Assignment", "[parser]" ) {
|
||||||
REQUIRE(assignment -> IsLocal());
|
REQUIRE(assignment -> IsLocal());
|
||||||
REQUIRE(assignment->GetIdentifier().GetHash() == HashedString("foo").GetHash());
|
REQUIRE(assignment->GetIdentifier().GetHash() == HashedString("foo").GetHash());
|
||||||
REQUIRE(((LiteralBoolExpression*)assignment->GetExpression()) -> GetValue());
|
REQUIRE(((LiteralBoolExpression*)assignment->GetExpression()) -> GetValue());
|
||||||
|
|
||||||
|
for (auto t : v){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete parsedScript;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE( "Parse function declaration", "[parser]" ){
|
TEST_CASE( "Parse function declaration", "[parser]" ){
|
||||||
|
@ -210,7 +269,8 @@ TEST_CASE( "Parse function declaration", "[parser]" ){
|
||||||
new SimpleToken(TokenKind::EndOfFile,0,0),
|
new SimpleToken(TokenKind::EndOfFile,0,0),
|
||||||
};
|
};
|
||||||
Parser parser = Parser(v, nullptr);
|
Parser parser = Parser(v, nullptr);
|
||||||
auto parsedStatements = parser.Parse() -> GetStatements();
|
auto parsedScript = parser.Parse();
|
||||||
|
auto parsedStatements = parsedScript -> GetStatements();
|
||||||
REQUIRE(parsedStatements->size() == 1);
|
REQUIRE(parsedStatements->size() == 1);
|
||||||
auto firstStatement = parsedStatements -> at(0);
|
auto firstStatement = parsedStatements -> at(0);
|
||||||
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::FunctionDeclaration);
|
REQUIRE(firstStatement -> GetKind() == ParsedStatementKind::FunctionDeclaration);
|
||||||
|
@ -221,6 +281,11 @@ TEST_CASE( "Parse function declaration", "[parser]" ){
|
||||||
CHECK(parameters -> at(0) ->GetIdentifier() == HashedString("bar"));
|
CHECK(parameters -> at(0) ->GetIdentifier() == HashedString("bar"));
|
||||||
CHECK(parameters -> at(1) ->GetType() == HashedString("number"));
|
CHECK(parameters -> at(1) ->GetType() == HashedString("number"));
|
||||||
CHECK(parameters -> at(1) ->GetIdentifier() == HashedString("par"));
|
CHECK(parameters -> at(1) ->GetIdentifier() == HashedString("par"));
|
||||||
|
|
||||||
|
for (auto t : v){
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
delete parsedScript;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue