From 373794a830b5a3052088ba6e7b7fc7c67eb4e5c3 Mon Sep 17 00:00:00 2001 From: Deukhoofd Date: Wed, 6 Apr 2022 22:39:25 +0200 Subject: [PATCH] Adds func parsing, integration tests, some general fixes related to integration tests --- Cargo.toml | 10 +- src/integration_tests/add_function.rs | 353 ++++++++++++++ src/integration_tests/build.rs | 108 +++++ src/integration_tests/enum_definition.rs | 439 ++++++++++++++++++ src/integration_tests/mod.rs | 2 + .../test_cases/add_function/lex_tokens.json | 229 +++++++++ .../test_cases/add_function/parsed_tree.json | 96 ++++ .../test_cases/add_function/script.ses | 3 + .../enum_definition/lex_tokens.json | 366 +++++++++++++++ .../enum_definition/parsed_tree.json | 41 ++ .../test_cases/enum_definition/script.ses | 7 + src/lib.rs | 3 + src/modifiers.rs | 8 +- src/parsing/lexer/lex_tokens.rs | 5 + src/parsing/lexer/mod.rs | 7 +- src/parsing/parser/mod.rs | 175 +++++-- src/parsing/parser/parsed_funcattr.rs | 3 + src/parsing/parser/parsed_statement.rs | 34 +- src/parsing/parser/parsed_type_modifier.rs | 4 + src/parsing/parser/parser_operators.rs | 7 + src/parsing/parser/parser_tests.rs | 6 +- src/prim_type.rs | 3 + src/span.rs | 6 +- 23 files changed, 1873 insertions(+), 42 deletions(-) create mode 100644 src/integration_tests/add_function.rs create mode 100644 src/integration_tests/build.rs create mode 100644 src/integration_tests/enum_definition.rs create mode 100644 src/integration_tests/mod.rs create mode 100644 src/integration_tests/test_cases/add_function/lex_tokens.json create mode 100644 src/integration_tests/test_cases/add_function/parsed_tree.json create mode 100644 src/integration_tests/test_cases/add_function/script.ses create mode 100644 src/integration_tests/test_cases/enum_definition/lex_tokens.json create mode 100644 src/integration_tests/test_cases/enum_definition/parsed_tree.json create mode 100644 src/integration_tests/test_cases/enum_definition/script.ses diff --git a/Cargo.toml b/Cargo.toml index c8841e7..28f1f18 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,12 +3,20 @@ name = "seraph_script" version = "0.1.0" authors = ["Deukhoofd "] edition = "2021" +build = "src/integration_tests/build.rs" [dependencies] itertools = "0.10.0" -enumflags2 = "0.7.1" +enumflags2 = { version = "0.7.1", features = ["serde"] } backtrace = "0.3.63" +[dev-dependencies] +glob = "0.3.0" +serde = "1.0.136" +serde_derive = "1.0.136" +serde_json = "1.0.79" +pretty_assertions = "1.2.1" + [profile.release] lto = true codegen-units = 1 diff --git a/src/integration_tests/add_function.rs b/src/integration_tests/add_function.rs new file mode 100644 index 0000000..ffb819d --- /dev/null +++ b/src/integration_tests/add_function.rs @@ -0,0 +1,353 @@ +//////////////////////////// +// Automatically Generated// +//////////////////////////// +use crate::logger::messages::Message; +use crate::parsing::lexer::lex; +use crate::parsing::lexer::lex_tokens::LexToken; +use crate::parsing::parser::parse; +use crate::parsing::parser::parsed_statement::ParsedStatement; +use crate::span::Span; + +fn panic_on_error(msg: Message, _: Span) { + std::panic::panic_any(msg.stringify()); +} +#[test] +fn integration_add_function() { + let script = "int add(int a, int b) { + return a + b; +}"; + let lexed_tokens = lex(script, &mut panic_on_error); + println!("{}", serde_json::to_string(&lexed_tokens).unwrap()); + let expected_tokens: Vec = + serde_json::from_str(r#"[ + { + "token_type": "IntKeyword", + "span": { + "start": 0, + "end": 3 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 3, + "end": 4 + } + }, + { + "token_type": { + "Identifier": "add" + }, + "span": { + "start": 4, + "end": 7 + } + }, + { + "token_type": "OpenBracket", + "span": { + "start": 7, + "end": 8 + } + }, + { + "token_type": "IntKeyword", + "span": { + "start": 8, + "end": 11 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 11, + "end": 12 + } + }, + { + "token_type": { + "Identifier": "a" + }, + "span": { + "start": 12, + "end": 13 + } + }, + { + "token_type": "Comma", + "span": { + "start": 13, + "end": 14 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 14, + "end": 15 + } + }, + { + "token_type": "IntKeyword", + "span": { + "start": 15, + "end": 18 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 18, + "end": 19 + } + }, + { + "token_type": { + "Identifier": "b" + }, + "span": { + "start": 19, + "end": 20 + } + }, + { + "token_type": "CloseBracket", + "span": { + "start": 20, + "end": 21 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 21, + "end": 22 + } + }, + { + "token_type": "OpenCurlyBracket", + "span": { + "start": 22, + "end": 23 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 23, + "end": 24 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 24, + "end": 25 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 25, + "end": 26 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 26, + "end": 27 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 27, + "end": 28 + } + }, + { + "token_type": "ReturnKeyword", + "span": { + "start": 28, + "end": 34 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 34, + "end": 35 + } + }, + { + "token_type": { + "Identifier": "a" + }, + "span": { + "start": 35, + "end": 36 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 36, + "end": 37 + } + }, + { + "token_type": "Plus", + "span": { + "start": 37, + "end": 38 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 38, + "end": 39 + } + }, + { + "token_type": { + "Identifier": "b" + }, + "span": { + "start": 39, + "end": 40 + } + }, + { + "token_type": "Semicolon", + "span": { + "start": 40, + "end": 41 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 41, + "end": 42 + } + }, + { + "token_type": "CloseCurlyBracket", + "span": { + "start": 42, + "end": 43 + } + }, + { + "token_type": "EndOfFile", + "span": { + "start": 43, + "end": 43 + } + } +]"#).unwrap(); + assert_eq!(lexed_tokens, expected_tokens); + + let parsed_tree = parse(lexed_tokens, &mut panic_on_error); + println!("{}", serde_json::to_string(&parsed_tree).unwrap()); + let expected_tree: Box = + serde_json::from_str(r#"{ + "Script": { + "statements": [ + { + "FuncDeclaration": { + "modifiers": 0, + "field_mod": null, + "is_destructor": false, + "returns_reference": false, + "return_type": { + "Type": { + "is_const": false, + "scope": null, + "datatype": { + "DataTypePrimType": { + "prim_type": "Int32" + } + }, + "modifiers": [] + } + }, + "name": "add", + "param_list": { + "ParamList": { + "parameters": [ + { + "parameter_type": { + "Type": { + "is_const": false, + "scope": null, + "datatype": { + "DataTypePrimType": { + "prim_type": "Int32" + } + }, + "modifiers": [] + } + }, + "type_mod": null, + "identifier": "a", + "default": null + }, + { + "parameter_type": { + "Type": { + "is_const": false, + "scope": null, + "datatype": { + "DataTypePrimType": { + "prim_type": "Int32" + } + }, + "modifiers": [] + } + }, + "type_mod": null, + "identifier": "b", + "default": null + } + ] + } + }, + "is_const": false, + "func_attr": 0, + "block": { + "StatBlock": { + "statements": [ + { + "ReturnStatement": { + "expression": { + "BinaryExpr": { + "left": { + "VarAccess": { + "scope": null, + "identifier": "a" + } + }, + "operator": "Addition", + "right": { + "VarAccess": { + "scope": null, + "identifier": "b" + } + } + } + } + } + } + ] + } + } + } + } + ] + } +}"#).unwrap(); + assert_eq!(parsed_tree, expected_tree); +} \ No newline at end of file diff --git a/src/integration_tests/build.rs b/src/integration_tests/build.rs new file mode 100644 index 0000000..033f7cd --- /dev/null +++ b/src/integration_tests/build.rs @@ -0,0 +1,108 @@ +use std::fs; +use std::fs::File; +use std::io::Write; +use std::path::Path; + +fn main() { + let paths = fs::read_dir("src/integration_tests/test_cases/").unwrap(); + let mod_file_path = Path::new("src/integration_tests/mod.rs"); + let mut mod_file = File::create(mod_file_path).unwrap(); + + for path_opt in paths { + if let Err(..) = path_opt { + continue; + } + let path = path_opt.unwrap(); + if !path.file_type().unwrap().is_dir() { + continue; + } + + let p = path.path(); + let mut script_path = p.clone(); + script_path.push("script.ses"); + + if !script_path.exists() { + continue; + } + + let script = fs::read_to_string(script_path).unwrap(); + let test_name = p.file_stem().unwrap().to_str().unwrap(); + + writeln!(mod_file, "mod {name};", name = test_name).unwrap(); + + let testfile_path = format!("src/integration_tests/{}.rs", test_name); + let mut testfile = File::create(Path::new(&testfile_path)).unwrap(); + + write!( + testfile, + r#"//////////////////////////// +// Automatically Generated// +//////////////////////////// +use crate::logger::messages::Message; +use crate::parsing::lexer::lex; +use crate::parsing::lexer::lex_tokens::LexToken; +use crate::parsing::parser::parse; +use crate::parsing::parser::parsed_statement::ParsedStatement; +use crate::span::Span; + +fn panic_on_error(msg: Message, _: Span) {{ + std::panic::panic_any(msg.stringify()); +}}"# + ) + .unwrap(); + write!( + testfile, + r#" +#[test] +fn integration_{name}() {{ + let script = "{script}"; + let lexed_tokens = lex(script, &mut panic_on_error); + println!("{{}}", serde_json::to_string(&lexed_tokens).unwrap()); +"#, + name = test_name, + script = script.as_str() + ) + .unwrap(); + + let mut tokens_file_path = p.clone(); + tokens_file_path.push("lex_tokens.json"); + if tokens_file_path.exists() { + let tokens_json = fs::read_to_string(tokens_file_path).unwrap(); + write!( + testfile, + r##" let expected_tokens: Vec = + serde_json::from_str(r#"{tokens}"#).unwrap(); + assert_eq!(lexed_tokens, expected_tokens); +"##, + tokens = tokens_json.as_str() + ) + .unwrap(); + } + + write!( + testfile, + r##" + let parsed_tree = parse(lexed_tokens, &mut panic_on_error); + println!("{{}}", serde_json::to_string(&parsed_tree).unwrap()); +"## + ) + .unwrap(); + + let mut parsed_tree_path = p.clone(); + parsed_tree_path.push("parsed_tree.json"); + if parsed_tree_path.exists() { + let parsed_tree_json = fs::read_to_string(parsed_tree_path).unwrap(); + write!( + testfile, + r##" let expected_tree: Box = + serde_json::from_str(r#"{expected_tree}"#).unwrap(); + assert_eq!(parsed_tree, expected_tree); +"##, + expected_tree = parsed_tree_json.as_str() + ) + .unwrap(); + } + + write!(testfile, "}}").unwrap(); + } +} diff --git a/src/integration_tests/enum_definition.rs b/src/integration_tests/enum_definition.rs new file mode 100644 index 0000000..e1e61fe --- /dev/null +++ b/src/integration_tests/enum_definition.rs @@ -0,0 +1,439 @@ +//////////////////////////// +// Automatically Generated// +//////////////////////////// +use crate::logger::messages::Message; +use crate::parsing::lexer::lex; +use crate::parsing::lexer::lex_tokens::LexToken; +use crate::parsing::parser::parse; +use crate::parsing::parser::parsed_statement::ParsedStatement; +use crate::span::Span; + +fn panic_on_error(msg: Message, _: Span) { + std::panic::panic_any(msg.stringify()); +} +#[test] +fn integration_enum_definition() { + let script = "enum TestEnum : uint8 { + a, + b, + c, + d = 128, + e +}"; + let lexed_tokens = lex(script, &mut panic_on_error); + println!("{}", serde_json::to_string(&lexed_tokens).unwrap()); + let expected_tokens: Vec = + serde_json::from_str(r#"[ + { + "token_type": "EnumKeyword", + "span": { + "start": 0, + "end": 4 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 4, + "end": 5 + } + }, + { + "token_type": { + "Identifier": "TestEnum" + }, + "span": { + "start": 5, + "end": 13 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 13, + "end": 14 + } + }, + { + "token_type": "Colon", + "span": { + "start": 14, + "end": 15 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 15, + "end": 16 + } + }, + { + "token_type": "Uint8Keyword", + "span": { + "start": 16, + "end": 21 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 21, + "end": 22 + } + }, + { + "token_type": "OpenCurlyBracket", + "span": { + "start": 22, + "end": 23 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 23, + "end": 24 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 24, + "end": 25 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 25, + "end": 26 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 26, + "end": 27 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 27, + "end": 28 + } + }, + { + "token_type": { + "Identifier": "a" + }, + "span": { + "start": 28, + "end": 29 + } + }, + { + "token_type": "Comma", + "span": { + "start": 29, + "end": 30 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 30, + "end": 31 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 31, + "end": 32 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 32, + "end": 33 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 33, + "end": 34 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 34, + "end": 35 + } + }, + { + "token_type": { + "Identifier": "b" + }, + "span": { + "start": 35, + "end": 36 + } + }, + { + "token_type": "Comma", + "span": { + "start": 36, + "end": 37 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 37, + "end": 38 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 38, + "end": 39 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 39, + "end": 40 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 40, + "end": 41 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 41, + "end": 42 + } + }, + { + "token_type": { + "Identifier": "c" + }, + "span": { + "start": 42, + "end": 43 + } + }, + { + "token_type": "Comma", + "span": { + "start": 43, + "end": 44 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 44, + "end": 45 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 45, + "end": 46 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 46, + "end": 47 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 47, + "end": 48 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 48, + "end": 49 + } + }, + { + "token_type": { + "Identifier": "d" + }, + "span": { + "start": 49, + "end": 50 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 50, + "end": 51 + } + }, + { + "token_type": "Equals", + "span": { + "start": 51, + "end": 52 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 52, + "end": 53 + } + }, + { + "token_type": { + "IntegerLiteral": 128 + }, + "span": { + "start": 53, + "end": 56 + } + }, + { + "token_type": "Comma", + "span": { + "start": 56, + "end": 57 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 57, + "end": 58 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 58, + "end": 59 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 59, + "end": 60 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 60, + "end": 61 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 61, + "end": 62 + } + }, + { + "token_type": { + "Identifier": "e" + }, + "span": { + "start": 62, + "end": 63 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 63, + "end": 64 + } + }, + { + "token_type": "CloseCurlyBracket", + "span": { + "start": 64, + "end": 65 + } + }, + { + "token_type": "EndOfFile", + "span": { + "start": 65, + "end": 65 + } + } +]"#).unwrap(); + assert_eq!(lexed_tokens, expected_tokens); + + let parsed_tree = parse(lexed_tokens, &mut panic_on_error); + println!("{}", serde_json::to_string(&parsed_tree).unwrap()); + let expected_tree: Box = + serde_json::from_str(r#"{ + "Script": { + "statements": [ + { + "EnumDeclaration": { + "modifiers": 0, + "identifier": "TestEnum", + "base_type": { + "DataTypePrimType": { + "prim_type": "UInt8" + } + }, + "values": [ + [ + "a", + null + ], + [ + "b", + null + ], + [ + "c", + null + ], + [ + "d", + { + "IntegerLiteral": 128 + } + ], + [ + "e", + null + ] + ] + } + } + ] + } +}"#).unwrap(); + assert_eq!(parsed_tree, expected_tree); +} \ No newline at end of file diff --git a/src/integration_tests/mod.rs b/src/integration_tests/mod.rs new file mode 100644 index 0000000..486bf46 --- /dev/null +++ b/src/integration_tests/mod.rs @@ -0,0 +1,2 @@ +mod enum_definition; +mod add_function; diff --git a/src/integration_tests/test_cases/add_function/lex_tokens.json b/src/integration_tests/test_cases/add_function/lex_tokens.json new file mode 100644 index 0000000..39ace15 --- /dev/null +++ b/src/integration_tests/test_cases/add_function/lex_tokens.json @@ -0,0 +1,229 @@ +[ + { + "token_type": "IntKeyword", + "span": { + "start": 0, + "end": 3 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 3, + "end": 4 + } + }, + { + "token_type": { + "Identifier": "add" + }, + "span": { + "start": 4, + "end": 7 + } + }, + { + "token_type": "OpenBracket", + "span": { + "start": 7, + "end": 8 + } + }, + { + "token_type": "IntKeyword", + "span": { + "start": 8, + "end": 11 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 11, + "end": 12 + } + }, + { + "token_type": { + "Identifier": "a" + }, + "span": { + "start": 12, + "end": 13 + } + }, + { + "token_type": "Comma", + "span": { + "start": 13, + "end": 14 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 14, + "end": 15 + } + }, + { + "token_type": "IntKeyword", + "span": { + "start": 15, + "end": 18 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 18, + "end": 19 + } + }, + { + "token_type": { + "Identifier": "b" + }, + "span": { + "start": 19, + "end": 20 + } + }, + { + "token_type": "CloseBracket", + "span": { + "start": 20, + "end": 21 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 21, + "end": 22 + } + }, + { + "token_type": "OpenCurlyBracket", + "span": { + "start": 22, + "end": 23 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 23, + "end": 24 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 24, + "end": 25 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 25, + "end": 26 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 26, + "end": 27 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 27, + "end": 28 + } + }, + { + "token_type": "ReturnKeyword", + "span": { + "start": 28, + "end": 34 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 34, + "end": 35 + } + }, + { + "token_type": { + "Identifier": "a" + }, + "span": { + "start": 35, + "end": 36 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 36, + "end": 37 + } + }, + { + "token_type": "Plus", + "span": { + "start": 37, + "end": 38 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 38, + "end": 39 + } + }, + { + "token_type": { + "Identifier": "b" + }, + "span": { + "start": 39, + "end": 40 + } + }, + { + "token_type": "Semicolon", + "span": { + "start": 40, + "end": 41 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 41, + "end": 42 + } + }, + { + "token_type": "CloseCurlyBracket", + "span": { + "start": 42, + "end": 43 + } + }, + { + "token_type": "EndOfFile", + "span": { + "start": 43, + "end": 43 + } + } +] \ No newline at end of file diff --git a/src/integration_tests/test_cases/add_function/parsed_tree.json b/src/integration_tests/test_cases/add_function/parsed_tree.json new file mode 100644 index 0000000..d673b37 --- /dev/null +++ b/src/integration_tests/test_cases/add_function/parsed_tree.json @@ -0,0 +1,96 @@ +{ + "Script": { + "statements": [ + { + "FuncDeclaration": { + "modifiers": 0, + "field_mod": null, + "is_destructor": false, + "returns_reference": false, + "return_type": { + "Type": { + "is_const": false, + "scope": null, + "datatype": { + "DataTypePrimType": { + "prim_type": "Int32" + } + }, + "modifiers": [] + } + }, + "name": "add", + "param_list": { + "ParamList": { + "parameters": [ + { + "parameter_type": { + "Type": { + "is_const": false, + "scope": null, + "datatype": { + "DataTypePrimType": { + "prim_type": "Int32" + } + }, + "modifiers": [] + } + }, + "type_mod": null, + "identifier": "a", + "default": null + }, + { + "parameter_type": { + "Type": { + "is_const": false, + "scope": null, + "datatype": { + "DataTypePrimType": { + "prim_type": "Int32" + } + }, + "modifiers": [] + } + }, + "type_mod": null, + "identifier": "b", + "default": null + } + ] + } + }, + "is_const": false, + "func_attr": 0, + "block": { + "StatBlock": { + "statements": [ + { + "ReturnStatement": { + "expression": { + "BinaryExpr": { + "left": { + "VarAccess": { + "scope": null, + "identifier": "a" + } + }, + "operator": "Addition", + "right": { + "VarAccess": { + "scope": null, + "identifier": "b" + } + } + } + } + } + } + ] + } + } + } + } + ] + } +} \ No newline at end of file diff --git a/src/integration_tests/test_cases/add_function/script.ses b/src/integration_tests/test_cases/add_function/script.ses new file mode 100644 index 0000000..082fc1e --- /dev/null +++ b/src/integration_tests/test_cases/add_function/script.ses @@ -0,0 +1,3 @@ +int add(int a, int b) { + return a + b; +} \ No newline at end of file diff --git a/src/integration_tests/test_cases/enum_definition/lex_tokens.json b/src/integration_tests/test_cases/enum_definition/lex_tokens.json new file mode 100644 index 0000000..c0ad342 --- /dev/null +++ b/src/integration_tests/test_cases/enum_definition/lex_tokens.json @@ -0,0 +1,366 @@ +[ + { + "token_type": "EnumKeyword", + "span": { + "start": 0, + "end": 4 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 4, + "end": 5 + } + }, + { + "token_type": { + "Identifier": "TestEnum" + }, + "span": { + "start": 5, + "end": 13 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 13, + "end": 14 + } + }, + { + "token_type": "Colon", + "span": { + "start": 14, + "end": 15 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 15, + "end": 16 + } + }, + { + "token_type": "Uint8Keyword", + "span": { + "start": 16, + "end": 21 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 21, + "end": 22 + } + }, + { + "token_type": "OpenCurlyBracket", + "span": { + "start": 22, + "end": 23 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 23, + "end": 24 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 24, + "end": 25 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 25, + "end": 26 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 26, + "end": 27 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 27, + "end": 28 + } + }, + { + "token_type": { + "Identifier": "a" + }, + "span": { + "start": 28, + "end": 29 + } + }, + { + "token_type": "Comma", + "span": { + "start": 29, + "end": 30 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 30, + "end": 31 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 31, + "end": 32 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 32, + "end": 33 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 33, + "end": 34 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 34, + "end": 35 + } + }, + { + "token_type": { + "Identifier": "b" + }, + "span": { + "start": 35, + "end": 36 + } + }, + { + "token_type": "Comma", + "span": { + "start": 36, + "end": 37 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 37, + "end": 38 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 38, + "end": 39 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 39, + "end": 40 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 40, + "end": 41 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 41, + "end": 42 + } + }, + { + "token_type": { + "Identifier": "c" + }, + "span": { + "start": 42, + "end": 43 + } + }, + { + "token_type": "Comma", + "span": { + "start": 43, + "end": 44 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 44, + "end": 45 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 45, + "end": 46 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 46, + "end": 47 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 47, + "end": 48 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 48, + "end": 49 + } + }, + { + "token_type": { + "Identifier": "d" + }, + "span": { + "start": 49, + "end": 50 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 50, + "end": 51 + } + }, + { + "token_type": "Equals", + "span": { + "start": 51, + "end": 52 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 52, + "end": 53 + } + }, + { + "token_type": { + "IntegerLiteral": 128 + }, + "span": { + "start": 53, + "end": 56 + } + }, + { + "token_type": "Comma", + "span": { + "start": 56, + "end": 57 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 57, + "end": 58 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 58, + "end": 59 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 59, + "end": 60 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 60, + "end": 61 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 61, + "end": 62 + } + }, + { + "token_type": { + "Identifier": "e" + }, + "span": { + "start": 62, + "end": 63 + } + }, + { + "token_type": "WhiteSpace", + "span": { + "start": 63, + "end": 64 + } + }, + { + "token_type": "CloseCurlyBracket", + "span": { + "start": 64, + "end": 65 + } + }, + { + "token_type": "EndOfFile", + "span": { + "start": 65, + "end": 65 + } + } +] \ No newline at end of file diff --git a/src/integration_tests/test_cases/enum_definition/parsed_tree.json b/src/integration_tests/test_cases/enum_definition/parsed_tree.json new file mode 100644 index 0000000..f8f2d10 --- /dev/null +++ b/src/integration_tests/test_cases/enum_definition/parsed_tree.json @@ -0,0 +1,41 @@ +{ + "Script": { + "statements": [ + { + "EnumDeclaration": { + "modifiers": 0, + "identifier": "TestEnum", + "base_type": { + "DataTypePrimType": { + "prim_type": "UInt8" + } + }, + "values": [ + [ + "a", + null + ], + [ + "b", + null + ], + [ + "c", + null + ], + [ + "d", + { + "IntegerLiteral": 128 + } + ], + [ + "e", + null + ] + ] + } + } + ] + } +} \ No newline at end of file diff --git a/src/integration_tests/test_cases/enum_definition/script.ses b/src/integration_tests/test_cases/enum_definition/script.ses new file mode 100644 index 0000000..5bfa2ed --- /dev/null +++ b/src/integration_tests/test_cases/enum_definition/script.ses @@ -0,0 +1,7 @@ +enum TestEnum : uint8 { + a, + b, + c, + d = 128, + e +} \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index a6fa792..08a09bd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,3 +13,6 @@ pub mod modifiers; pub mod parsing; pub(crate) mod prim_type; pub mod span; + +#[cfg(test)] +mod integration_tests; diff --git a/src/modifiers.rs b/src/modifiers.rs index bc79c96..170aee5 100644 --- a/src/modifiers.rs +++ b/src/modifiers.rs @@ -1,8 +1,11 @@ use enumflags2::bitflags; +#[cfg(test)] +use serde_derive::{Deserialize, Serialize}; #[bitflags] #[repr(u8)] -#[derive(Debug, Copy, Clone, PartialEq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[cfg_attr(test, derive(Serialize, Deserialize))] pub enum TypeModifier { External = 0x01, Shared = 0x02, @@ -12,7 +15,8 @@ pub enum TypeModifier { #[bitflags] #[repr(u8)] -#[derive(Debug, Copy, Clone, PartialEq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[cfg_attr(test, derive(Serialize, Deserialize))] pub enum FieldModifier { Private = 0x01, Protected = 0x02, diff --git a/src/parsing/lexer/lex_tokens.rs b/src/parsing/lexer/lex_tokens.rs index cba1f84..3788bc9 100644 --- a/src/parsing/lexer/lex_tokens.rs +++ b/src/parsing/lexer/lex_tokens.rs @@ -1,12 +1,17 @@ use crate::defines::{LiteralFloat, LiteralInt}; use crate::span::Span; +#[cfg(test)] +use serde_derive::{Deserialize, Serialize}; +#[derive(PartialEq, Debug)] +#[cfg_attr(test, derive(Serialize, Deserialize))] pub struct LexToken { pub token_type: TokenType, pub span: Span, } #[derive(PartialEq, Debug, Clone)] +#[cfg_attr(test, derive(Serialize, Deserialize))] pub enum TokenType { EndOfFile, WhiteSpace, diff --git a/src/parsing/lexer/mod.rs b/src/parsing/lexer/mod.rs index 27956ec..eed621a 100644 --- a/src/parsing/lexer/mod.rs +++ b/src/parsing/lexer/mod.rs @@ -34,6 +34,7 @@ fn lex_eq_or( chars.next(); f(eq, start_pos, chars.real_position); } else { + chars.reset_peek(); f(or, start_pos, chars.real_position); } } @@ -58,10 +59,14 @@ fn lex_eq_rep_or( chars.next(); f(eq, start_pos, chars.real_position); } else { + chars.reset_peek(); f(or, start_pos, chars.real_position); } } - None => f(or, start_pos, chars.real_position), + None => { + chars.reset_peek(); + f(or, start_pos, chars.real_position) + } }; } diff --git a/src/parsing/parser/mod.rs b/src/parsing/parser/mod.rs index 0deb6e9..7c3e8ee 100644 --- a/src/parsing/parser/mod.rs +++ b/src/parsing/parser/mod.rs @@ -9,8 +9,8 @@ use super::lexer::lex_tokens::{LexToken, TokenType}; use crate::logger::messages::Message; use crate::logger::messages::Message::UnexpectedToken; use crate::modifiers::{FieldModifier, TypeModifier}; -use crate::parsing::lexer::lex_tokens::TokenType::CloseBracket; use crate::parsing::parser::parsed_funcattr::FuncAttr; +use crate::parsing::parser::parsed_statement::ParsedParameter; use crate::parsing::parser::parsed_statement::ParsedStatement::{ AnonymousCall, ExprPostOp, IndexingOperator, }; @@ -143,12 +143,22 @@ fn parse_script( reader: &mut ParseReader, log: &mut dyn FnMut(Message, Span), ) -> Box { + // script ::= {import | enum | typedef | class | interface | funcdef | virtprop | var | func | namespace | ';'}; let mut vec: Vec = Vec::new(); loop { let n = reader.peek(); let token_type = n.token_type.clone(); let span = n.span; match token_type { + TokenType::ImportKeyword => { + vec.push(*parse_import(reader, log).unwrap()); + } + TokenType::TypeDefKeyword => { + vec.push(*parse_typedef(reader, log).unwrap()); + } + TokenType::FuncDefKeyword => { + vec.push(*parse_funcdef(reader, log).unwrap()); + } TokenType::NamespaceKeyword => { vec.push(*parse_namespace(reader, log)); } @@ -156,10 +166,20 @@ fn parse_script( TokenType::EndOfFile => break, TokenType::CloseCurlyBracket => break, _ => { - if let Some(s) = parse_interface(reader, log) { + if let Some(s) = parse_enum(reader, log) { + vec.push(*s); + } + // else if let Some(s) = parse_class(reader, log) { + // vec.push(*s); + // } + else if let Some(s) = parse_interface(reader, log) { + vec.push(*s); + } else if let Some(s) = parse_virtprop(reader, log) { vec.push(*s); } else if let Some(s) = parse_var(reader, log) { vec.push(*s); + } else if let Some(s) = parse_func(reader, log) { + vec.push(*s); } else { log( UnexpectedToken { @@ -494,12 +514,16 @@ fn parse_enum( // FIXME: Add error log if value is None } values.push((identifier.unwrap(), value)); + if reader.peek().token_type != TokenType::Comma { + break; + } + reader.next(); } reader.consume(TokenType::CloseCurlyBracket, log); } Some(Box::new(ParsedStatement::EnumDeclaration { - modifiers: modifiers, + modifiers, identifier: name.unwrap(), base_type, values, @@ -524,11 +548,85 @@ fn parse_funcdef( } fn parse_func( - _outer_reader: &mut ParseReader, - _log: &mut dyn FnMut(Message, Span), + outer_reader: &mut ParseReader, + log: &mut dyn FnMut(Message, Span), ) -> Option> { // func ::= {'shared' | 'external'} ['private' | 'protected'] [((type ['&']) | '~')] identifier paramlist ['const'] funcattr (';' | statblock); - unimplemented!(); + let mut reader = outer_reader.create_inner(); + let mut modifiers: BitFlags = BitFlags::empty(); + loop { + match reader.peek().token_type { + TokenType::SharedKeyword => { + reader.next(); + modifiers |= TypeModifier::Shared; + } + TokenType::ExternalKeyword => { + reader.next(); + modifiers |= TypeModifier::External; + } + _ => { + break; + } + } + } + let mut field_mod: Option = None; + match reader.peek().token_type { + TokenType::PrivateKeyword => { + field_mod = Some(FieldModifier::Private); + reader.next(); + } + TokenType::ProtectedKeyword => { + field_mod = Some(FieldModifier::Protected); + reader.next(); + } + _ => {} + } + + // [((type ['&']) | '~')] + let mut is_destructor = false; + let mut returns_reference = false; + let mut return_type = None; + + if reader.peek().token_type == TokenType::Tilde { + is_destructor = true; + reader.next(); + } else { + return_type = parse_type(&mut reader, log); + if return_type.is_some() && reader.peek().token_type == TokenType::Ampersand { + returns_reference = true; + reader.next(); + } + } + let name = parse_identifier(&mut reader, log, true); + name.as_ref()?; + let param_list = parse_paramlist(&mut reader, log); + param_list.as_ref()?; + let is_const = reader.peek().token_type == TokenType::ConstKeyword; + if is_const { + reader.next(); + } + let func_attr = parse_funcattr(&mut reader, log); + + let mut block = None; + if reader.peek().token_type == TokenType::Semicolon { + reader.next(); + } else { + block = parse_statblock(&mut reader, log); + } + + outer_reader.set_from_inner(&reader); + Some(Box::new(ParsedStatement::FuncDeclaration { + modifiers, + field_mod, + is_destructor, + returns_reference, + return_type, + name: name.unwrap(), + param_list: param_list.unwrap(), + is_const, + func_attr, + block, + })) } fn parse_virtprop( @@ -536,26 +634,21 @@ fn parse_virtprop( log: &mut dyn FnMut(Message, Span), ) -> Option> { let mut reader = outer_reader.create_inner(); - let mut field_mod: BitFlags = BitFlags::empty(); - let property_type: Option>; - loop { - let t = reader.peek(); - match t.token_type { - TokenType::PrivateKeyword => { - field_mod |= FieldModifier::Private; - reader.next(); - } - TokenType::ProtectedKeyword => { - field_mod |= FieldModifier::Protected; - reader.next(); - } - _ => { - property_type = parse_type(&mut reader, log); - property_type.as_ref()?; - break; - } + let mut field_mod: Option = None; + match reader.peek().token_type { + TokenType::PrivateKeyword => { + field_mod = Some(FieldModifier::Private); + reader.next(); } + TokenType::ProtectedKeyword => { + field_mod = Some(FieldModifier::Protected); + reader.next(); + } + _ => {} } + let property_type = parse_type(&mut reader, log); + property_type.as_ref()?; + let mut is_handle = false; if reader.peek().token_type == TokenType::AtSymbol { reader.next(); @@ -678,7 +771,16 @@ fn parse_paramlist( default = parse_expr(reader, log); // FIXME: log if default is emtpy } - params.push((param_type.unwrap(), type_mod, identifier, default)); + params.push(ParsedParameter { + parameter_type: param_type.unwrap(), + type_mod, + identifier, + default, + }); + if reader.peek().token_type != TokenType::Comma { + break; + } + reader.next(); } reader.consume(TokenType::CloseBracket, log); Some(Box::new(ParsedStatement::ParamList { parameters: params })) @@ -743,6 +845,8 @@ fn parse_var( outer_reader: &mut ParseReader, log: &mut dyn FnMut(Message, Span), ) -> Option> { + // var ::= ['private'|'protected'] type identifier [( '=' (initlist | expr)) | arglist] {',' identifier [( '=' (initlist | expr)) | arglist]} ';'; + // var ::= ['private'|'protected'] type identifier let mut reader = outer_reader.create_inner(); @@ -793,6 +897,11 @@ fn parse_var( // {',' identifier [( '=' (initlist | expr)) | arglist]} ';'; + if reader.peek().token_type != TokenType::Semicolon { + return None; + } + reader.next(); + outer_reader.set_from_inner(&reader); Some(Box::new(ParsedStatement::Var { modifier: field_mod, @@ -1087,6 +1196,9 @@ fn parse_expr( if let Some(..) = binary_operand { let expr_term2 = parse_exprterm(reader, log); + if expr_term2.is_none() { + unimplemented!() + } // FIXME: deal with empty expr_term2 return Some(Box::new(ParsedStatement::BinaryExpr { left: expr_term.unwrap(), @@ -1105,8 +1217,8 @@ fn parse_exprterm( let mut reader = outer_reader.create_inner(); // exprterm ::= ([type '='] initlist) | ({exprpreop} exprvalue {exprpostop}); let expr_type = parse_type(&mut reader, log); - if expr_type.is_some() { - outer_reader.consume(TokenType::Equals, log); + if expr_type.is_some() && reader.peek().token_type == TokenType::Equals { + reader.consume(TokenType::Equals, log); } let mut init_list = None; @@ -1148,6 +1260,7 @@ fn parse_exprterm( }) } + println!("{:?}", real_value); outer_reader.set_from_inner(&reader); Some(real_value) } @@ -1420,7 +1533,7 @@ fn parse_constructcall( let construct_type = parse_type(&mut reader, log); construct_type.as_ref()?; let arg_list = parse_arglist(&mut reader, log); - // FIXME: deal with None value for arg list + arg_list.as_ref()?; outer_reader.set_from_inner(&reader); Some(Box::new(ParsedStatement::ConstructCall { @@ -1654,6 +1767,9 @@ fn parse_primtype( TokenType::Int32Keyword => Some(Box::new(ParsedStatement::DataTypePrimType { prim_type: PrimitiveType::Int32, })), + TokenType::IntKeyword => Some(Box::new(ParsedStatement::DataTypePrimType { + prim_type: PrimitiveType::Int32, + })), TokenType::Int64Keyword => Some(Box::new(ParsedStatement::DataTypePrimType { prim_type: PrimitiveType::Int64, })), @@ -1666,6 +1782,9 @@ fn parse_primtype( TokenType::Uint32Keyword => Some(Box::new(ParsedStatement::DataTypePrimType { prim_type: PrimitiveType::UInt32, })), + TokenType::UintKeyword => Some(Box::new(ParsedStatement::DataTypePrimType { + prim_type: PrimitiveType::UInt32, + })), TokenType::Uint64Keyword => Some(Box::new(ParsedStatement::DataTypePrimType { prim_type: PrimitiveType::UInt64, })), diff --git a/src/parsing/parser/parsed_funcattr.rs b/src/parsing/parser/parsed_funcattr.rs index 70ca619..a01bbe9 100644 --- a/src/parsing/parser/parsed_funcattr.rs +++ b/src/parsing/parser/parsed_funcattr.rs @@ -1,8 +1,11 @@ use enumflags2::bitflags; +#[cfg(test)] +use serde_derive::{Deserialize, Serialize}; #[bitflags] #[repr(u8)] #[derive(Eq, PartialEq, Debug, Copy, Clone)] +#[cfg_attr(test, derive(Serialize, Deserialize))] pub enum FuncAttr { Override, Final, diff --git a/src/parsing/parser/parsed_statement.rs b/src/parsing/parser/parsed_statement.rs index 83e2322..3ea9c29 100644 --- a/src/parsing/parser/parsed_statement.rs +++ b/src/parsing/parser/parsed_statement.rs @@ -9,7 +9,20 @@ use crate::parsing::parser::parser_operators::{ use crate::prim_type::PrimitiveType; use enumflags2::BitFlags; +#[cfg(test)] +use serde_derive::{Deserialize, Serialize}; + #[derive(PartialEq, Debug)] +#[cfg_attr(test, derive(Serialize, Deserialize))] +pub struct ParsedParameter { + pub parameter_type: Box, + pub type_mod: Option>, + pub identifier: Option, + pub default: Option>, +} + +#[derive(PartialEq, Debug)] +#[cfg_attr(test, derive(Serialize, Deserialize))] pub enum ParsedStatement { Invalid, Script { @@ -31,7 +44,7 @@ pub enum ParsedStatement { generic_types: Option>, }, VirtProp { - field_mod: BitFlags, + field_mod: Option, property_type: Box, identifier: String, is_handle: bool, @@ -169,12 +182,7 @@ pub enum ParsedStatement { expression: Option>, }, ParamList { - parameters: Vec<( - Box, // type - Option>, // typemod - Option, // identifier - Option>, // default expression - )>, + parameters: Vec, }, InterfaceMethod { return_type: Box, @@ -204,4 +212,16 @@ pub enum ParsedStatement { base_type: Option>, values: Vec<(String, Option>)>, }, + FuncDeclaration { + modifiers: BitFlags, + field_mod: Option, + is_destructor: bool, + returns_reference: bool, + return_type: Option>, + name: String, + param_list: Box, + is_const: bool, + func_attr: BitFlags, + block: Option>, + }, } diff --git a/src/parsing/parser/parsed_type_modifier.rs b/src/parsing/parser/parsed_type_modifier.rs index a210003..0b6f8f0 100644 --- a/src/parsing/parser/parsed_type_modifier.rs +++ b/src/parsing/parser/parsed_type_modifier.rs @@ -1,6 +1,9 @@ use enumflags2::bitflags; +#[cfg(test)] +use serde_derive::{Deserialize, Serialize}; #[derive(Eq, PartialEq, Debug)] +#[cfg_attr(test, derive(Serialize, Deserialize))] pub enum ParsedTypeModifier { Array, Handle, @@ -10,6 +13,7 @@ pub enum ParsedTypeModifier { #[bitflags] #[repr(u8)] #[derive(Eq, PartialEq, Debug, Copy, Clone)] +#[cfg_attr(test, derive(Serialize, Deserialize))] pub enum ReferenceModifier { In, Out, diff --git a/src/parsing/parser/parser_operators.rs b/src/parsing/parser/parser_operators.rs index 156275c..369de41 100644 --- a/src/parsing/parser/parser_operators.rs +++ b/src/parsing/parser/parser_operators.rs @@ -1,4 +1,8 @@ +#[cfg(test)] +use serde_derive::{Deserialize, Serialize}; + #[derive(Eq, PartialEq, Debug, Copy, Clone)] +#[cfg_attr(test, derive(Serialize, Deserialize))] pub enum PreOperator { Negative, Identity, @@ -10,12 +14,14 @@ pub enum PreOperator { } #[derive(Eq, PartialEq, Debug, Copy, Clone)] +#[cfg_attr(test, derive(Serialize, Deserialize))] pub enum PostOperator { Increment, Decrement, } #[derive(Eq, PartialEq, Debug, Copy, Clone)] +#[cfg_attr(test, derive(Serialize, Deserialize))] pub enum BinaryOperator { // math op Addition, @@ -61,6 +67,7 @@ pub enum BinaryOperator { } #[derive(Eq, PartialEq, Debug, Copy, Clone)] +#[cfg_attr(test, derive(Serialize, Deserialize))] pub enum TernaryOperator { Conditional, } diff --git a/src/parsing/parser/parser_tests.rs b/src/parsing/parser/parser_tests.rs index c73a9fd..6bc6f04 100644 --- a/src/parsing/parser/parser_tests.rs +++ b/src/parsing/parser/parser_tests.rs @@ -5,7 +5,6 @@ use crate::parsing::lexer::lex_tokens::{LexToken, TokenType}; use crate::parsing::parser::parsed_statement::ParsedStatement::DataTypeAuto; use crate::parsing::parser::parser_operators::{BinaryOperator, PreOperator}; use crate::span::Span; -use enumflags2::BitFlags; fn create_tokens(types: Vec) -> Vec { let mut v = Vec::with_capacity(types.len()); @@ -179,7 +178,7 @@ fn test_interface_with_virtprop() { set_statement, } = &statements[0] { - assert_eq!(*field_mod, BitFlags::empty()); + assert_eq!(*field_mod, None); if let ParsedStatement::Type { is_const, datatype, .. } = property_type.as_ref() @@ -225,6 +224,7 @@ fn test_assign_to_global_variable() { TokenType::Equals, TokenType::WhiteSpace, TokenType::IntegerLiteral(100), + TokenType::Semicolon, TokenType::EndOfFile, ]), &mut |_message, _span| { @@ -276,6 +276,7 @@ fn test_assign_negative_to_global_variable() { TokenType::WhiteSpace, TokenType::Minus, TokenType::IntegerLiteral(100), + TokenType::Semicolon, TokenType::EndOfFile, ]), &mut |_message, _span| { @@ -332,6 +333,7 @@ fn test_assign_addition_to_global_variable() { TokenType::Plus, TokenType::Minus, TokenType::IntegerLiteral(20), + TokenType::Semicolon, TokenType::EndOfFile, ]), &mut |_message, _span| { diff --git a/src/prim_type.rs b/src/prim_type.rs index b4d27eb..2b29c2f 100644 --- a/src/prim_type.rs +++ b/src/prim_type.rs @@ -1,6 +1,9 @@ use crate::defines::PointerSize; +#[cfg(test)] +use serde_derive::{Deserialize, Serialize}; #[derive(Eq, PartialEq, Debug)] +#[cfg_attr(test, derive(Serialize, Deserialize))] pub enum PrimitiveType { Void, Int8, diff --git a/src/span.rs b/src/span.rs index 2adedd7..994f36d 100644 --- a/src/span.rs +++ b/src/span.rs @@ -1,4 +1,8 @@ -#[derive(Copy, Clone, Debug)] +#[cfg(test)] +use serde_derive::{Deserialize, Serialize}; + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[cfg_attr(test, derive(Serialize, Deserialize))] pub struct Span { pub start: usize, pub end: usize,