Speed up lexer by allocating more space for its tokens initially.

This commit is contained in:
Deukhoofd 2019-09-20 18:51:31 +02:00
parent 0763ebbad0
commit 0d59a1d029
Signed by: Deukhoofd
GPG Key ID: ADF2E9256009EDCE
1 changed files with 8 additions and 2 deletions

View File

@ -14,17 +14,23 @@ namespace Porygon::Parser {
vector<const Token *> Lexer::Lex() { vector<const Token *> Lexer::Lex() {
vector<const Token *> tokens; // This is a very greedy allocation. Look into potentially making this less greedy.
vector<const Token *> tokens(this->_scriptSize+1);
size_t current = 0;
while (true) { while (true) {
Token *next = this->LexNext(this->Next()); Token *next = this->LexNext(this->Next());
auto nextKind = next->GetKind(); auto nextKind = next->GetKind();
if (nextKind != TokenKind::WhiteSpace && nextKind != TokenKind::Comment) if (nextKind != TokenKind::WhiteSpace && nextKind != TokenKind::Comment)
tokens.push_back(next); {
tokens[current] = next;
current++;
}
else else
delete next; delete next;
if (nextKind == TokenKind::EndOfFile) if (nextKind == TokenKind::EndOfFile)
break; break;
} }
tokens.resize(current);
return tokens; return tokens;
} }