diff options
-rw-r--r-- | Makefile | 4 | ||||
-rw-r--r-- | cpp.cpp | 47 | ||||
-rwxr-xr-x | cppbnf.cpp | 62 | ||||
-rw-r--r-- | cppbnf.h | 10 | ||||
-rw-r--r-- | test-cpp.cpp | 61 | ||||
-rw-r--r-- | test-cppbnf.cpp | 45 | ||||
-rw-r--r-- | test-lexer.cpp | 2 | ||||
-rw-r--r-- | test-minicc.cpp | 8 |
8 files changed, 160 insertions, 79 deletions
@@ -45,12 +45,14 @@ endif SRC=\ bnf.cpp \ cpp.cpp \ + test-cpp.cpp \ + cppbnf.cpp \ + test-cppbnf.cpp \ grammer.cpp \ lexer.cpp \ test-lexer.cpp \ minicc.cpp \ test-minicc.cpp \ - cppbnf.cpp \ googletest/src/gtest-all.cpp \ googlemock/src/gmock-all.cpp @@ -33,7 +33,7 @@ void CPP::backslash_escape() // Phase 3: Parse preprocessing tokens std::vector<Token> CPP::preprocessing_tokenize(const std::string& s) { - auto bnf{SubBNF(GetCppBNFLex(), "preprocessing-token")}; + auto bnf{SubBNF(CPPBNF::GetCppBNFLex(), "preprocessing-token")}; Lex::Lexer lexer(bnf, "preprocessing-token"); @@ -137,9 +137,13 @@ std::vector<Token> CPP::tokens_from_pptokens(std::vector<Token> pp_tokens) } // Phase 7.b: Grammar Analysis -std::pair<index_t, std::vector<Gram::TreeNode>> analysis(std::vector<Token>) +std::pair<index_t, std::vector<Gram::TreeNode>> CPP::analysis(std::vector<Token> tokens) { - return {0 , {}}; + auto bnf = SubBNF(CPPBNF::GetCppBNFGram(), "translation-unit"); + + Gram::Compiler compiler(bnf, "translation-unit"); + + return compiler.compile(tokens); } // Phase 7.c: Translate @@ -178,40 +182,3 @@ void CPP::translate(const std::string& code) #endif } -class CppTest: public ::testing::Test -{ -protected: - CppTest() { - //debug = true; - } - ~CppTest() { - } -}; - -#if 1 -TEST_F(CppTest, preprocessing_tokenize) { - CPP cpp; - auto pp_tokens = cpp.preprocessing_tokenize("int main() { return 1; }"); - - ASSERT_EQ(pp_tokens.size(), 9); - - auto tokens = cpp.tokens_from_pptokens(pp_tokens); - - ASSERT_EQ(tokens.size(), 9); -} -#endif - -#if 0 -TEST_F(CppTest, preprocessing_tokenize2) { - CPP cpp; - auto ppTree = cpp.preprocessing_tokenize("in ma"); - - cpp.tokens_from_pptokens(ppTree); -} -#endif - -#if 0 -TEST(Cpp, translate) { - CPP::translate(); -} -#endif @@ -90,29 +90,6 @@ namespace { return true; } - // returns 1 if exactly one start symbol and - // all nodes size > 1, except terminal symbols - bool valid(const BNF& bnf) { - return numberOfStartSymbols(bnf) == 1 && symbolsValid(bnf); - } - - bool validLex(const BNF& bnf) { - // all terminal symbols exactly one character - - for (const auto& [symbol, lists] : bnf) { - for (const auto& list : lists) { - for (const auto& i : list) { - if (i.size() != 1 && isTerminal(bnf, i)) { - std::cerr << "Warning: Terminal symbol in " << symbol << " is too long: "s << i << std::endl; - return false; - } - } - } - } - - return true; - } - const std::string optionalMarker{"OPTIONAL:"}; std::string optional(const std::string& s) @@ -206,6 +183,32 @@ namespace { } // namespace +namespace CPPBNF { + +// returns 1 if exactly one start symbol and +// all nodes size > 1, except terminal symbols +bool valid(const BNF& bnf) { + return numberOfStartSymbols(bnf) == 1 && symbolsValid(bnf); +} + +bool validLex(const BNF& bnf) { + // all terminal symbols exactly one character + + for (const auto& [symbol, lists] : bnf) { + for (const auto& list : lists) { + for (const auto& i : list) { + if (i.size() != 1 && isTerminal(bnf, i)) { + std::cerr << "Warning: Terminal symbol in " << symbol << " is too long: "s << i << std::endl; + return false; + } + } + } + } + + return true; +} + + BNF GetCppBNFLex() { BNF bnf{ @@ -1919,15 +1922,4 @@ BNF GetCppBNFGram() return normalizeBNF(bnf); } -TEST(CppBnf, LexicalBnf) { - auto bnf = SubBNF(GetCppBNFLex(), "preprocessing-token"); - - EXPECT_TRUE(valid(bnf)); - EXPECT_TRUE(validLex(bnf)); -} - -TEST(CppBnf, GrammarBnf) { - auto bnf = SubBNF(GetCppBNFGram(), "translation-unit"); - - EXPECT_TRUE(valid(bnf)); -} +} // namespace CPPBNF @@ -2,5 +2,11 @@ #include "bnf.h" -BNF GetCppBNFLex(); -BNF GetCppBNFGram(); +namespace CPPBNF { + + BNF GetCppBNFLex(); + BNF GetCppBNFGram(); + bool valid(const BNF& bnf); + bool validLex(const BNF& bnf); + +} diff --git a/test-cpp.cpp b/test-cpp.cpp new file mode 100644 index 0000000..d08f9b0 --- /dev/null +++ b/test-cpp.cpp @@ -0,0 +1,61 @@ +#include "bnf.h" +#include "cpp.h" +#include "cppbnf.h" +#include "lexer.h" +#include "grammer.h" +#include "minicc.h" +#include "debug.h" + +#include <boost/algorithm/string.hpp> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +#include <algorithm> +#include <cctype> +#include <deque> +#include <map> +#include <memory> +#include <string> +#include <utility> +#include <vector> + +class CppTest: public ::testing::Test +{ +protected: + CppTest() { + //debug = true; + } + ~CppTest() { + } +}; + +#if 1 +TEST_F(CppTest, preprocessing_tokenize) { + CPP cpp; + auto pp_tokens = cpp.preprocessing_tokenize("int main() { return 1; }"); + + ASSERT_EQ(pp_tokens.size(), 9); + + auto tokens = cpp.tokens_from_pptokens(pp_tokens); + + ASSERT_EQ(tokens.size(), 9); + + //auto result = cpp.analysis(tokens); +} +#endif + +#if 0 +TEST_F(CppTest, preprocessing_tokenize2) { + CPP cpp; + auto ppTree = cpp.preprocessing_tokenize("in ma"); + + cpp.tokens_from_pptokens(ppTree); +} +#endif + +#if 0 +TEST(Cpp, translate) { + CPP::translate(); +} +#endif diff --git a/test-cppbnf.cpp b/test-cppbnf.cpp new file mode 100644 index 0000000..e365574 --- /dev/null +++ b/test-cppbnf.cpp @@ -0,0 +1,45 @@ +#include "bnf.h" +#include "cpp.h" +#include "cppbnf.h" +#include "lexer.h" +#include "grammer.h" +#include "minicc.h" +#include "debug.h" + +#include <boost/algorithm/string.hpp> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +#include <algorithm> +#include <cctype> +#include <deque> +#include <map> +#include <memory> +#include <string> +#include <utility> +#include <vector> + +class CppBnfTest: public ::testing::Test +{ +protected: + CppBnfTest() { + //debug = true; + } + ~CppBnfTest() { + } +}; + +TEST_F(CppBnfTest, LexicalBnf) { + auto bnf = SubBNF(CPPBNF::GetCppBNFLex(), "preprocessing-token"); + + EXPECT_TRUE(CPPBNF::valid(bnf)); + EXPECT_TRUE(CPPBNF::validLex(bnf)); +} + +TEST_F(CppBnfTest, GrammarBnf) { + auto bnf = SubBNF(CPPBNF::GetCppBNFGram(), "translation-unit"); + + EXPECT_TRUE(CPPBNF::valid(bnf)); +} + diff --git a/test-lexer.cpp b/test-lexer.cpp index 9f1cb77..23983f1 100644 --- a/test-lexer.cpp +++ b/test-lexer.cpp @@ -29,7 +29,7 @@ protected: }; TEST_F(LexerTest, Lex) { - auto bnf{SubBNF(GetCppBNFLex(), "preprocessing-token")}; + auto bnf{SubBNF(CPPBNF::GetCppBNFLex(), "preprocessing-token")}; Lex::Lexer lexer(bnf, "preprocessing-token"); diff --git a/test-minicc.cpp b/test-minicc.cpp new file mode 100644 index 0000000..baf8b3f --- /dev/null +++ b/test-minicc.cpp @@ -0,0 +1,8 @@ +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +int main(int argc, char* argv[]) { + ::testing::InitGoogleMock(&argc, argv); + return RUN_ALL_TESTS(); +} + |