Speed up lexer by allocating more space for its tokens initially.
This commit is contained in:
parent
0763ebbad0
commit
0d59a1d029
|
@ -14,17 +14,23 @@ namespace Porygon::Parser {
|
|||
|
||||
|
||||
vector<const Token *> Lexer::Lex() {
|
||||
vector<const Token *> tokens;
|
||||
// This is a very greedy allocation. Look into potentially making this less greedy.
|
||||
vector<const Token *> tokens(this->_scriptSize+1);
|
||||
size_t current = 0;
|
||||
while (true) {
|
||||
Token *next = this->LexNext(this->Next());
|
||||
auto nextKind = next->GetKind();
|
||||
if (nextKind != TokenKind::WhiteSpace && nextKind != TokenKind::Comment)
|
||||
tokens.push_back(next);
|
||||
{
|
||||
tokens[current] = next;
|
||||
current++;
|
||||
}
|
||||
else
|
||||
delete next;
|
||||
if (nextKind == TokenKind::EndOfFile)
|
||||
break;
|
||||
}
|
||||
tokens.resize(current);
|
||||
return tokens;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue