From 8285811fb731fea0fb7b5925ca72892260881755 Mon Sep 17 00:00:00 2001 From: Deukhoofd Date: Sun, 19 May 2019 12:49:26 +0200 Subject: [PATCH] Add a couple more characters to lex --- src/Parser/Lexer.cpp | 67 +++++++++++++++++++++++++++++++++++++--- src/Parser/Token.hpp | 13 ++++++++ src/Parser/TokenKind.hpp | 8 +++++ 3 files changed, 83 insertions(+), 5 deletions(-) diff --git a/src/Parser/Lexer.cpp b/src/Parser/Lexer.cpp index c6c47fb..2f2e654 100644 --- a/src/Parser/Lexer.cpp +++ b/src/Parser/Lexer.cpp @@ -35,11 +35,22 @@ IToken* Lexer::LexNext(char c){ switch (c) { case '\0': return new SimpleToken(TokenKind::EndOfFile); - case ' ': - case '\t': - case '\n': - case '\r': + case ' ': case '\t': case '\n': case '\r': return new SimpleToken(TokenKind::WhiteSpace); + case '+': + return new SimpleToken(TokenKind::PlusToken); + case '-': + return new SimpleToken(TokenKind::MinusToken); + case '/': + return new SimpleToken(TokenKind::SlashToken); + case '*': + return new SimpleToken(TokenKind::StarToken); + case '=': + if (Lexer::Peek() == '='){ + Lexer::Next(); + return new SimpleToken(TokenKind::EqualityToken); + } + return new SimpleToken(TokenKind::AssignmentToken); case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': return LexNumber(c); default: @@ -101,7 +112,7 @@ IToken* Lexer::LexNumber(char c){ } } if (has_point){ - throw; + return new FloatToken(float_value); } else{ return new IntegerToken(int_value); @@ -139,6 +150,39 @@ TEST_CASE( "Lex Null Terminator as EOF", "[lexer]" ) { REQUIRE(lexer.LexNext('\0') -> GetKind() == TokenKind::EndOfFile); } +TEST_CASE( "Lex Plus Token", "[lexer]" ) { + Lexer lexer = Lexer(""); + REQUIRE(lexer.LexNext('+') -> GetKind() == TokenKind::PlusToken); +} + +TEST_CASE( "Lex Minus Token", "[lexer]" ) { + Lexer lexer = Lexer(""); + REQUIRE(lexer.LexNext('-') -> GetKind() == TokenKind::MinusToken); +} + +TEST_CASE( "Lex Slash Token", "[lexer]" ) { + Lexer lexer = Lexer(""); + REQUIRE(lexer.LexNext('/') -> GetKind() == TokenKind::SlashToken); +} + +TEST_CASE( "Lex Star Token", "[lexer]" ) { + Lexer lexer = Lexer(""); + REQUIRE(lexer.LexNext('*') -> GetKind() == TokenKind::StarToken); +} + +TEST_CASE( "Lex Assignment Token", "[lexer]" ) { + Lexer lexer = Lexer(""); + REQUIRE(lexer.LexNext('=') -> GetKind() == TokenKind::AssignmentToken); +} + +TEST_CASE( "Lex Equality Token", "[lexer]" ) { + Lexer lexer = Lexer("=="); + auto tokens = lexer.Lex(); + REQUIRE(tokens.size() == 2); + IToken* firstToken = tokens[0]; + REQUIRE(firstToken -> GetKind() == TokenKind::EqualityToken); +} + TEST_CASE( "Lex Whitespace", "[lexer]" ) { Lexer lexer = Lexer(""); CHECK(lexer.LexNext(' ') -> GetKind() == TokenKind::WhiteSpace); @@ -174,4 +218,17 @@ TEST_CASE( "Lex Longer Integers", "[lexer]" ) { } } +TEST_CASE( "Lex Floats", "[lexer]" ) { + double floats[] {0.5, 0.8, 100.7, 52.3548, 8461354.1324886}; + for (double f : floats){ + Lexer lexer = Lexer(std::to_string(f)); + auto tokens = lexer.Lex(); + REQUIRE(tokens.size() == 2); + IToken* firstToken = tokens[0]; + REQUIRE(firstToken -> GetKind() == TokenKind::Float); + auto* floatToken = (FloatToken *)firstToken; + CHECK(floatToken -> Value == Approx(f)); + } +} + #endif \ No newline at end of file diff --git a/src/Parser/Token.hpp b/src/Parser/Token.hpp index 445aec5..74787ad 100644 --- a/src/Parser/Token.hpp +++ b/src/Parser/Token.hpp @@ -34,4 +34,17 @@ public: } }; +class FloatToken : public IToken{ +public: + double Value; + + explicit FloatToken(double value){ + Value = value; + } + + TokenKind GetKind() override{ + return TokenKind::Float; + } +}; + #endif //PORYGONLANG_TOKEN_HPP diff --git a/src/Parser/TokenKind.hpp b/src/Parser/TokenKind.hpp index ebbf145..e184f0b 100644 --- a/src/Parser/TokenKind.hpp +++ b/src/Parser/TokenKind.hpp @@ -5,7 +5,15 @@ enum TokenKind{ EndOfFile, WhiteSpace, + PlusToken, + MinusToken, + SlashToken, + StarToken, + AssignmentToken, + EqualityToken, + Integer, + Float, };