aboutsummaryrefslogtreecommitdiffstats
path: root/gtest/test
diff options
context:
space:
mode:
authorJeff Davidson <jpd@google.com>2015-01-20 10:18:05 -0800
committerJeff Davidson <jpd@google.com>2015-01-20 10:18:05 -0800
commit0ddac1f3791efefb2cffdb425f0c600feb7a47e6 (patch)
treeefeab8fb69198186f1dabfe43d341c7b70c9c5f1 /gtest/test
parent77a6b2f4cdd580d57630f079db1d908d7fd90a54 (diff)
downloadexternal_protobuf-0ddac1f3791efefb2cffdb425f0c600feb7a47e6.zip
external_protobuf-0ddac1f3791efefb2cffdb425f0c600feb7a47e6.tar.gz
external_protobuf-0ddac1f3791efefb2cffdb425f0c600feb7a47e6.tar.bz2
Update protobuf's gtest to expected version.
Generated by running: rm -rf gtest ./autogen.sh ./configure Change-Id: I5d9c0bea09dd9d3e4d7d4442dd8222011f5c522a
Diffstat (limited to 'gtest/test')
-rw-r--r--gtest/test/gtest-death-test_test.cc82
-rw-r--r--gtest/test/gtest-filepath_test.cc106
-rw-r--r--gtest/test/gtest-listener_test.cc63
-rw-r--r--gtest/test/gtest-message_test.cc24
-rw-r--r--gtest/test/gtest-options_test.cc139
-rw-r--r--gtest/test/gtest-param-test_test.cc137
-rw-r--r--gtest/test/gtest-port_test.cc341
-rw-r--r--gtest/test/gtest-test-part_test.cc49
-rw-r--r--gtest/test/gtest-tuple_test.cc42
-rw-r--r--gtest/test/gtest-typed-test_test.cc12
-rw-r--r--gtest/test/gtest_all_test.cc1
-rwxr-xr-xgtest/test/gtest_break_on_failure_unittest.py11
-rw-r--r--gtest/test/gtest_break_on_failure_unittest_.cc20
-rw-r--r--gtest/test/gtest_color_test_.cc13
-rwxr-xr-xgtest/test/gtest_env_var_test.py10
-rwxr-xr-xgtest/test/gtest_filter_unittest.py189
-rwxr-xr-xgtest/test/gtest_help_test.py55
-rw-r--r--gtest/test/gtest_help_test_.cc4
-rw-r--r--gtest/test/gtest_nc.cc234
-rwxr-xr-xgtest/test/gtest_nc_test.py106
-rwxr-xr-xgtest/test/gtest_output_test.py86
-rw-r--r--gtest/test/gtest_output_test_.cc162
-rw-r--r--gtest/test/gtest_output_test_golden_lin.txt72
-rwxr-xr-xgtest/test/gtest_shuffle_test.py14
-rw-r--r--gtest/test/gtest_stress_test.cc150
-rwxr-xr-xgtest/test/gtest_test_utils.py65
-rw-r--r--gtest/test/gtest_unittest.cc1057
-rw-r--r--gtest/test/production.h2
-rwxr-xr-xgtest/test/run_tests_util.py466
-rwxr-xr-xgtest/test/run_tests_util_test.py676
30 files changed, 2998 insertions, 1390 deletions
diff --git a/gtest/test/gtest-death-test_test.cc b/gtest/test/gtest-death-test_test.cc
index 288c70a..ed5b53b 100644
--- a/gtest/test/gtest-death-test_test.cc
+++ b/gtest/test/gtest-death-test_test.cc
@@ -103,6 +103,21 @@ class ReplaceDeathTestFactory {
} // namespace internal
} // namespace testing
+void DieInside(const char* function) {
+ fprintf(stderr, "death inside %s().", function);
+ fflush(stderr);
+ // We call _exit() instead of exit(), as the former is a direct
+ // system call and thus safer in the presence of threads. exit()
+ // will invoke user-defined exit-hooks, which may do dangerous
+ // things that conflict with death tests.
+ //
+ // Some compilers can recognize that _exit() never returns and issue the
+ // 'unreachable code' warning for code following this function, unless
+ // fooled by a fake condition.
+ if (AlwaysTrue())
+ _exit(1);
+}
+
// Tests that death tests work.
class TestForDeathTest : public testing::Test {
@@ -114,23 +129,12 @@ class TestForDeathTest : public testing::Test {
}
// A static member function that's expected to die.
- static void StaticMemberFunction() {
- fprintf(stderr, "%s", "death inside StaticMemberFunction().");
- fflush(stderr);
- // We call _exit() instead of exit(), as the former is a direct
- // system call and thus safer in the presence of threads. exit()
- // will invoke user-defined exit-hooks, which may do dangerous
- // things that conflict with death tests.
- _exit(1);
- }
+ static void StaticMemberFunction() { DieInside("StaticMemberFunction"); }
// A method of the test fixture that may die.
void MemberFunction() {
- if (should_die_) {
- fprintf(stderr, "%s", "death inside MemberFunction().");
- fflush(stderr);
- _exit(1);
- }
+ if (should_die_)
+ DieInside("MemberFunction");
}
// True iff MemberFunction() should die.
@@ -145,9 +149,8 @@ class MayDie {
// A member function that may die.
void MemberFunction() const {
- if (should_die_) {
- GTEST_LOG_(FATAL) << "death inside MayDie::MemberFunction().";
- }
+ if (should_die_)
+ DieInside("MayDie::MemberFunction");
}
private:
@@ -156,27 +159,24 @@ class MayDie {
};
// A global function that's expected to die.
-void GlobalFunction() {
- GTEST_LOG_(FATAL) << "death inside GlobalFunction().";
-}
+void GlobalFunction() { DieInside("GlobalFunction"); }
// A non-void function that's expected to die.
int NonVoidFunction() {
- GTEST_LOG_(FATAL) << "death inside NonVoidFunction().";
+ DieInside("NonVoidFunction");
return 1;
}
// A unary function that may die.
void DieIf(bool should_die) {
- if (should_die) {
- GTEST_LOG_(FATAL) << "death inside DieIf().";
- }
+ if (should_die)
+ DieInside("DieIf");
}
// A binary function that may die.
bool DieIfLessThan(int x, int y) {
if (x < y) {
- GTEST_LOG_(FATAL) << "death inside DieIfLessThan().";
+ DieInside("DieIfLessThan");
}
return true;
}
@@ -191,7 +191,7 @@ void DeathTestSubroutine() {
int DieInDebugElse12(int* sideeffect) {
if (sideeffect) *sideeffect = 12;
#ifndef NDEBUG
- GTEST_LOG_(FATAL) << "debug death inside DieInDebugElse12()";
+ DieInside("DieInDebugElse12");
#endif // NDEBUG
return 12;
}
@@ -410,7 +410,7 @@ void SetPthreadFlag() {
} // namespace
-#if GTEST_HAS_CLONE
+#if GTEST_HAS_CLONE && GTEST_HAS_PTHREAD
TEST_F(TestForDeathTest, DoesNotExecuteAtforkHooks) {
if (!testing::GTEST_FLAG(death_test_use_fork)) {
@@ -422,7 +422,7 @@ TEST_F(TestForDeathTest, DoesNotExecuteAtforkHooks) {
}
}
-#endif // GTEST_HAS_CLONE
+#endif // GTEST_HAS_CLONE && GTEST_HAS_PTHREAD
// Tests that a method of another class can be used in a death test.
TEST_F(TestForDeathTest, MethodOfAnotherClass) {
@@ -449,10 +449,8 @@ TEST_F(TestForDeathTest, AcceptsAnythingConvertibleToRE) {
EXPECT_DEATH(GlobalFunction(), regex_str);
#endif // GTEST_HAS_GLOBAL_STRING
-#if GTEST_HAS_STD_STRING
const ::std::string regex_std_str(regex_c_str);
EXPECT_DEATH(GlobalFunction(), regex_std_str);
-#endif // GTEST_HAS_STD_STRING
}
// Tests that a non-void function can be used in a death test.
@@ -659,24 +657,6 @@ static void TestExitMacros() {
EXPECT_EXIT(_exit(1), testing::ExitedWithCode(1), "");
ASSERT_EXIT(_exit(42), testing::ExitedWithCode(42), "");
-#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MINGW
- // MinGW (as of MinGW 5.1.6 and MSYS 1.0.11) does not tag crashed
- // processes with non-zero exit code and does not honor calls to
- // SetErrorMode(SEM_NOGPFAULTERRORBOX) that are supposed to suppress
- // error pop-ups.
- EXPECT_EXIT({
- testing::GTEST_FLAG(catch_exceptions) = false;
- *static_cast<int*>(NULL) = 1;
- }, testing::ExitedWithCode(0xC0000005), "") << "foo";
-
- EXPECT_NONFATAL_FAILURE({ // NOLINT
- EXPECT_EXIT({
- testing::GTEST_FLAG(catch_exceptions) = false;
- *static_cast<int*>(NULL) = 1;
- }, testing::ExitedWithCode(0), "") << "This failure is expected.";
- }, "This failure is expected.");
-#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MINGW
-
#if GTEST_OS_WINDOWS
// Of all signals effects on the process exit code, only those of SIGABRT
// are documented on Windows.
@@ -1131,8 +1111,10 @@ TEST(EnvironmentTest, HandleFitsIntoSizeT) {
// Tests that EXPECT_DEATH_IF_SUPPORTED/ASSERT_DEATH_IF_SUPPORTED trigger
// failures when death tests are available on the system.
TEST(ConditionalDeathMacrosDeathTest, ExpectsDeathWhenDeathTestsAvailable) {
- EXPECT_DEATH_IF_SUPPORTED(GTEST_CHECK_(false) << "failure", "false.*failure");
- ASSERT_DEATH_IF_SUPPORTED(GTEST_CHECK_(false) << "failure", "false.*failure");
+ EXPECT_DEATH_IF_SUPPORTED(DieInside("CondDeathTestExpectMacro"),
+ "death inside CondDeathTestExpectMacro");
+ ASSERT_DEATH_IF_SUPPORTED(DieInside("CondDeathTestAssertMacro"),
+ "death inside CondDeathTestAssertMacro");
// Empty statement will not crash, which must trigger a failure.
EXPECT_NONFATAL_FAILURE(EXPECT_DEATH_IF_SUPPORTED(;, ""), "");
diff --git a/gtest/test/gtest-filepath_test.cc b/gtest/test/gtest-filepath_test.cc
index 5bc4daf..6250282 100644
--- a/gtest/test/gtest-filepath_test.cc
+++ b/gtest/test/gtest-filepath_test.cc
@@ -151,6 +151,36 @@ TEST(RemoveDirectoryNameTest, ShouldAlsoGiveFileName) {
.RemoveDirectoryName().c_str());
}
+#if GTEST_HAS_ALT_PATH_SEP_
+
+// Tests that RemoveDirectoryName() works with the alternate separator
+// on Windows.
+
+// RemoveDirectoryName("/afile") -> "afile"
+TEST(RemoveDirectoryNameTest, RootFileShouldGiveFileNameForAlternateSeparator) {
+ EXPECT_STREQ("afile",
+ FilePath("/afile").RemoveDirectoryName().c_str());
+}
+
+// RemoveDirectoryName("adir/") -> ""
+TEST(RemoveDirectoryNameTest, WhereThereIsNoFileNameForAlternateSeparator) {
+ EXPECT_STREQ("",
+ FilePath("adir/").RemoveDirectoryName().c_str());
+}
+
+// RemoveDirectoryName("adir/afile") -> "afile"
+TEST(RemoveDirectoryNameTest, ShouldGiveFileNameForAlternateSeparator) {
+ EXPECT_STREQ("afile",
+ FilePath("adir/afile").RemoveDirectoryName().c_str());
+}
+
+// RemoveDirectoryName("adir/subdir/afile") -> "afile"
+TEST(RemoveDirectoryNameTest, ShouldAlsoGiveFileNameForAlternateSeparator) {
+ EXPECT_STREQ("afile",
+ FilePath("adir/subdir/afile").RemoveDirectoryName().c_str());
+}
+
+#endif
// RemoveFileName "" -> "./"
TEST(RemoveFileNameTest, EmptyName) {
@@ -190,6 +220,36 @@ TEST(RemoveFileNameTest, GivesRootDir) {
FilePath(GTEST_PATH_SEP_ "afile").RemoveFileName().c_str());
}
+#if GTEST_HAS_ALT_PATH_SEP_
+
+// Tests that RemoveFileName() works with the alternate separator on
+// Windows.
+
+// RemoveFileName("adir/") -> "adir/"
+TEST(RemoveFileNameTest, ButNoFileForAlternateSeparator) {
+ EXPECT_STREQ("adir" GTEST_PATH_SEP_,
+ FilePath("adir/").RemoveFileName().c_str());
+}
+
+// RemoveFileName("adir/afile") -> "adir/"
+TEST(RemoveFileNameTest, GivesDirNameForAlternateSeparator) {
+ EXPECT_STREQ("adir" GTEST_PATH_SEP_,
+ FilePath("adir/afile").RemoveFileName().c_str());
+}
+
+// RemoveFileName("adir/subdir/afile") -> "adir/subdir/"
+TEST(RemoveFileNameTest, GivesDirAndSubDirNameForAlternateSeparator) {
+ EXPECT_STREQ("adir" GTEST_PATH_SEP_ "subdir" GTEST_PATH_SEP_,
+ FilePath("adir/subdir/afile").RemoveFileName().c_str());
+}
+
+// RemoveFileName("/afile") -> "\"
+TEST(RemoveFileNameTest, GivesRootDirForAlternateSeparator) {
+ EXPECT_STREQ(GTEST_PATH_SEP_,
+ FilePath("/afile").RemoveFileName().c_str());
+}
+
+#endif
TEST(MakeFileNameTest, GenerateWhenNumberIsZero) {
FilePath actual = FilePath::MakeFileName(FilePath("foo"), FilePath("bar"),
@@ -295,6 +355,10 @@ TEST(RemoveTrailingPathSeparatorTest, ShouldRemoveTrailingSeparator) {
EXPECT_STREQ(
"foo",
FilePath("foo" GTEST_PATH_SEP_).RemoveTrailingPathSeparator().c_str());
+#if GTEST_HAS_ALT_PATH_SEP_
+ EXPECT_STREQ("foo",
+ FilePath("foo/").RemoveTrailingPathSeparator().c_str());
+#endif
}
// RemoveTrailingPathSeparator "foo/bar/" -> "foo/bar/"
@@ -397,6 +461,22 @@ TEST(NormalizeTest, MultipleConsecutiveSepaparatorsAtStringEnd) {
FilePath("foo" GTEST_PATH_SEP_ GTEST_PATH_SEP_ GTEST_PATH_SEP_).c_str());
}
+#if GTEST_HAS_ALT_PATH_SEP_
+
+// Tests that separators at the end of the string are normalized
+// regardless of their combination (e.g. "foo\" =="foo/\" ==
+// "foo\\/").
+TEST(NormalizeTest, MixAlternateSeparatorAtStringEnd) {
+ EXPECT_STREQ("foo" GTEST_PATH_SEP_,
+ FilePath("foo/").c_str());
+ EXPECT_STREQ("foo" GTEST_PATH_SEP_,
+ FilePath("foo" GTEST_PATH_SEP_ "/").c_str());
+ EXPECT_STREQ("foo" GTEST_PATH_SEP_,
+ FilePath("foo//" GTEST_PATH_SEP_).c_str());
+}
+
+#endif
+
TEST(AssignmentOperatorTest, DefaultAssignedToNonDefault) {
FilePath default_path;
FilePath non_default_path("path");
@@ -566,6 +646,9 @@ TEST(FilePathTest, RemoveExtensionWhenThereIsNoExtension) {
TEST(FilePathTest, IsDirectory) {
EXPECT_FALSE(FilePath("cola").IsDirectory());
EXPECT_TRUE(FilePath("koala" GTEST_PATH_SEP_).IsDirectory());
+#if GTEST_HAS_ALT_PATH_SEP_
+ EXPECT_TRUE(FilePath("koala/").IsDirectory());
+#endif
}
TEST(FilePathTest, IsAbsolutePath) {
@@ -575,14 +658,33 @@ TEST(FilePathTest, IsAbsolutePath) {
EXPECT_TRUE(FilePath("c:\\" GTEST_PATH_SEP_ "is_not"
GTEST_PATH_SEP_ "relative").IsAbsolutePath());
EXPECT_FALSE(FilePath("c:foo" GTEST_PATH_SEP_ "bar").IsAbsolutePath());
+ EXPECT_TRUE(FilePath("c:/" GTEST_PATH_SEP_ "is_not"
+ GTEST_PATH_SEP_ "relative").IsAbsolutePath());
#else
EXPECT_TRUE(FilePath(GTEST_PATH_SEP_ "is_not" GTEST_PATH_SEP_ "relative")
.IsAbsolutePath());
#endif // GTEST_OS_WINDOWS
}
+TEST(FilePathTest, IsRootDirectory) {
+#if GTEST_OS_WINDOWS
+ EXPECT_TRUE(FilePath("a:\\").IsRootDirectory());
+ EXPECT_TRUE(FilePath("Z:/").IsRootDirectory());
+ EXPECT_TRUE(FilePath("e://").IsRootDirectory());
+ EXPECT_FALSE(FilePath("").IsRootDirectory());
+ EXPECT_FALSE(FilePath("b:").IsRootDirectory());
+ EXPECT_FALSE(FilePath("b:a").IsRootDirectory());
+ EXPECT_FALSE(FilePath("8:/").IsRootDirectory());
+ EXPECT_FALSE(FilePath("c|/").IsRootDirectory());
+#else
+ EXPECT_TRUE(FilePath("/").IsRootDirectory());
+ EXPECT_TRUE(FilePath("//").IsRootDirectory());
+ EXPECT_FALSE(FilePath("").IsRootDirectory());
+ EXPECT_FALSE(FilePath("\\").IsRootDirectory());
+ EXPECT_FALSE(FilePath("/x").IsRootDirectory());
+#endif
+}
+
} // namespace
} // namespace internal
} // namespace testing
-
-#undef GTEST_PATH_SEP_
diff --git a/gtest/test/gtest-listener_test.cc b/gtest/test/gtest-listener_test.cc
index f12f518..c9be39a 100644
--- a/gtest/test/gtest-listener_test.cc
+++ b/gtest/test/gtest-listener_test.cc
@@ -34,15 +34,7 @@
// right times.
#include <gtest/gtest.h>
-
-// Indicates that this translation unit is part of Google Test's
-// implementation. It must come before gtest-internal-inl.h is
-// included, or there will be a compiler error. This trick is to
-// prevent a user from accidentally including gtest-internal-inl.h in
-// his code.
-#define GTEST_IMPLEMENTATION_ 1
-#include "src/gtest-internal-inl.h" // For Vector.
-#undef GTEST_IMPLEMENTATION_
+#include <vector>
using ::testing::AddGlobalTestEnvironment;
using ::testing::Environment;
@@ -54,10 +46,9 @@ using ::testing::TestInfo;
using ::testing::TestPartResult;
using ::testing::UnitTest;
using ::testing::internal::String;
-using ::testing::internal::Vector;
// Used by tests to register their events.
-Vector<String>* g_events = NULL;
+std::vector<String>* g_events = NULL;
namespace testing {
namespace internal {
@@ -68,7 +59,7 @@ class EventRecordingListener : public TestEventListener {
protected:
virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {
- g_events->PushBack(GetFullMethodName("OnTestProgramStart"));
+ g_events->push_back(GetFullMethodName("OnTestProgramStart"));
}
virtual void OnTestIterationStart(const UnitTest& /*unit_test*/,
@@ -76,43 +67,43 @@ class EventRecordingListener : public TestEventListener {
Message message;
message << GetFullMethodName("OnTestIterationStart")
<< "(" << iteration << ")";
- g_events->PushBack(message.GetString());
+ g_events->push_back(message.GetString());
}
virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) {
- g_events->PushBack(GetFullMethodName("OnEnvironmentsSetUpStart"));
+ g_events->push_back(GetFullMethodName("OnEnvironmentsSetUpStart"));
}
virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {
- g_events->PushBack(GetFullMethodName("OnEnvironmentsSetUpEnd"));
+ g_events->push_back(GetFullMethodName("OnEnvironmentsSetUpEnd"));
}
virtual void OnTestCaseStart(const TestCase& /*test_case*/) {
- g_events->PushBack(GetFullMethodName("OnTestCaseStart"));
+ g_events->push_back(GetFullMethodName("OnTestCaseStart"));
}
virtual void OnTestStart(const TestInfo& /*test_info*/) {
- g_events->PushBack(GetFullMethodName("OnTestStart"));
+ g_events->push_back(GetFullMethodName("OnTestStart"));
}
virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) {
- g_events->PushBack(GetFullMethodName("OnTestPartResult"));
+ g_events->push_back(GetFullMethodName("OnTestPartResult"));
}
virtual void OnTestEnd(const TestInfo& /*test_info*/) {
- g_events->PushBack(GetFullMethodName("OnTestEnd"));
+ g_events->push_back(GetFullMethodName("OnTestEnd"));
}
virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {
- g_events->PushBack(GetFullMethodName("OnTestCaseEnd"));
+ g_events->push_back(GetFullMethodName("OnTestCaseEnd"));
}
virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) {
- g_events->PushBack(GetFullMethodName("OnEnvironmentsTearDownStart"));
+ g_events->push_back(GetFullMethodName("OnEnvironmentsTearDownStart"));
}
virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {
- g_events->PushBack(GetFullMethodName("OnEnvironmentsTearDownEnd"));
+ g_events->push_back(GetFullMethodName("OnEnvironmentsTearDownEnd"));
}
virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/,
@@ -120,11 +111,11 @@ class EventRecordingListener : public TestEventListener {
Message message;
message << GetFullMethodName("OnTestIterationEnd")
<< "(" << iteration << ")";
- g_events->PushBack(message.GetString());
+ g_events->push_back(message.GetString());
}
virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {
- g_events->PushBack(GetFullMethodName("OnTestProgramEnd"));
+ g_events->push_back(GetFullMethodName("OnTestProgramEnd"));
}
private:
@@ -140,42 +131,42 @@ class EventRecordingListener : public TestEventListener {
class EnvironmentInvocationCatcher : public Environment {
protected:
virtual void SetUp() {
- g_events->PushBack(String("Environment::SetUp"));
+ g_events->push_back(String("Environment::SetUp"));
}
virtual void TearDown() {
- g_events->PushBack(String("Environment::TearDown"));
+ g_events->push_back(String("Environment::TearDown"));
}
};
class ListenerTest : public Test {
protected:
static void SetUpTestCase() {
- g_events->PushBack(String("ListenerTest::SetUpTestCase"));
+ g_events->push_back(String("ListenerTest::SetUpTestCase"));
}
static void TearDownTestCase() {
- g_events->PushBack(String("ListenerTest::TearDownTestCase"));
+ g_events->push_back(String("ListenerTest::TearDownTestCase"));
}
virtual void SetUp() {
- g_events->PushBack(String("ListenerTest::SetUp"));
+ g_events->push_back(String("ListenerTest::SetUp"));
}
virtual void TearDown() {
- g_events->PushBack(String("ListenerTest::TearDown"));
+ g_events->push_back(String("ListenerTest::TearDown"));
}
};
TEST_F(ListenerTest, DoesFoo) {
// Test execution order within a test case is not guaranteed so we are not
// recording the test name.
- g_events->PushBack(String("ListenerTest::* Test Body"));
+ g_events->push_back(String("ListenerTest::* Test Body"));
SUCCEED(); // Triggers OnTestPartResult.
}
TEST_F(ListenerTest, DoesBar) {
- g_events->PushBack(String("ListenerTest::* Test Body"));
+ g_events->push_back(String("ListenerTest::* Test Body"));
SUCCEED(); // Triggers OnTestPartResult.
}
@@ -186,7 +177,7 @@ TEST_F(ListenerTest, DoesBar) {
using ::testing::internal::EnvironmentInvocationCatcher;
using ::testing::internal::EventRecordingListener;
-void VerifyResults(const Vector<String>& data,
+void VerifyResults(const std::vector<String>& data,
const char* const* expected_data,
int expected_data_size) {
const int actual_size = data.size();
@@ -199,18 +190,18 @@ void VerifyResults(const Vector<String>& data,
expected_data_size : actual_size;
int i = 0;
for (; i < shorter_size; ++i) {
- ASSERT_STREQ(expected_data[i], data.GetElement(i).c_str())
+ ASSERT_STREQ(expected_data[i], data[i].c_str())
<< "at position " << i;
}
// Prints extra elements in the actual data.
for (; i < actual_size; ++i) {
- printf(" Actual event #%d: %s\n", i, data.GetElement(i).c_str());
+ printf(" Actual event #%d: %s\n", i, data[i].c_str());
}
}
int main(int argc, char **argv) {
- Vector<String> events;
+ std::vector<String> events;
g_events = &events;
InitGoogleTest(&argc, argv);
diff --git a/gtest/test/gtest-message_test.cc b/gtest/test/gtest-message_test.cc
index 6c43c33..e42b034 100644
--- a/gtest/test/gtest-message_test.cc
+++ b/gtest/test/gtest-message_test.cc
@@ -68,6 +68,23 @@ TEST(MessageTest, ConstructsFromCString) {
EXPECT_STREQ("Hello", ToCString(msg));
}
+// Tests streaming a float.
+TEST(MessageTest, StreamsFloat) {
+ const char* const s = ToCString(Message() << 1.23456F << " " << 2.34567F);
+ // Both numbers should be printed with enough precision.
+ EXPECT_PRED_FORMAT2(testing::IsSubstring, "1.234560", s);
+ EXPECT_PRED_FORMAT2(testing::IsSubstring, " 2.345669", s);
+}
+
+// Tests streaming a double.
+TEST(MessageTest, StreamsDouble) {
+ const char* const s = ToCString(Message() << 1260570880.4555497 << " "
+ << 1260572265.1954534);
+ // Both numbers should be printed with enough precision.
+ EXPECT_PRED_FORMAT2(testing::IsSubstring, "1260570880.45", s);
+ EXPECT_PRED_FORMAT2(testing::IsSubstring, " 1260572265.19", s);
+}
+
// Tests streaming a non-char pointer.
TEST(MessageTest, StreamsPointer) {
int n = 0;
@@ -92,12 +109,7 @@ TEST(MessageTest, StreamsNullCString) {
EXPECT_STREQ("(null)", ToCString(Message() << p));
}
-#if GTEST_HAS_STD_STRING
-
// Tests streaming std::string.
-//
-// As std::string has problem in MSVC when exception is disabled, we only
-// test this where std::string can be used.
TEST(MessageTest, StreamsString) {
const ::std::string str("Hello");
EXPECT_STREQ("Hello", ToCString(Message() << str));
@@ -113,8 +125,6 @@ TEST(MessageTest, StreamsStringWithEmbeddedNUL) {
ToCString(Message() << string_with_nul));
}
-#endif // GTEST_HAS_STD_STRING
-
// Tests streaming a NUL char.
TEST(MessageTest, StreamsNULChar) {
EXPECT_STREQ("\\0", ToCString(Message() << '\0'));
diff --git a/gtest/test/gtest-options_test.cc b/gtest/test/gtest-options_test.cc
index 31ae327..2e2cbc9 100644
--- a/gtest/test/gtest-options_test.cc
+++ b/gtest/test/gtest-options_test.cc
@@ -89,61 +89,38 @@ TEST(XmlOutputTest, GetOutputFileSingleFile) {
}
TEST(XmlOutputTest, GetOutputFileFromDirectoryPath) {
-#if GTEST_OS_WINDOWS
- GTEST_FLAG(output) = "xml:path\\";
+ GTEST_FLAG(output) = "xml:path" GTEST_PATH_SEP_;
+ const std::string expected_output_file =
+ GetAbsolutePathOf(
+ FilePath(std::string("path") + GTEST_PATH_SEP_ +
+ GetCurrentExecutableName().c_str() + ".xml")).c_str();
const String& output_file = UnitTestOptions::GetAbsolutePathToOutputFile();
- EXPECT_TRUE(
- _strcmpi(output_file.c_str(),
- GetAbsolutePathOf(
- FilePath("path\\gtest-options_test.xml")).c_str()) == 0 ||
- _strcmpi(output_file.c_str(),
- GetAbsolutePathOf(
- FilePath("path\\gtest-options-ex_test.xml")).c_str()) == 0 ||
- _strcmpi(output_file.c_str(),
- GetAbsolutePathOf(
- FilePath("path\\gtest_all_test.xml")).c_str()) == 0)
- << " output_file = " << output_file;
+#if GTEST_OS_WINDOWS
+ EXPECT_STRCASEEQ(expected_output_file.c_str(), output_file.c_str());
#else
- GTEST_FLAG(output) = "xml:path/";
- const String& output_file = UnitTestOptions::GetAbsolutePathToOutputFile();
- // TODO(wan@google.com): libtool causes the test binary file to be
- // named lt-gtest-options_test. Therefore the output file may be
- // named .../lt-gtest-options_test.xml. We should remove this
- // hard-coded logic when Chandler Carruth's libtool replacement is
- // ready.
- EXPECT_TRUE(output_file ==
- GetAbsolutePathOf(
- FilePath("path/gtest-options_test.xml")).c_str() ||
- output_file ==
- GetAbsolutePathOf(
- FilePath("path/lt-gtest-options_test.xml")).c_str() ||
- output_file ==
- GetAbsolutePathOf(
- FilePath("path/gtest_all_test.xml")).c_str() ||
- output_file ==
- GetAbsolutePathOf(
- FilePath("path/lt-gtest_all_test.xml")).c_str())
- << " output_file = " << output_file;
+ EXPECT_EQ(expected_output_file, output_file.c_str());
#endif
}
TEST(OutputFileHelpersTest, GetCurrentExecutableName) {
- const FilePath executable = GetCurrentExecutableName();
- const char* const exe_str = executable.c_str();
+ const std::string exe_str = GetCurrentExecutableName().c_str();
#if GTEST_OS_WINDOWS
- ASSERT_TRUE(_strcmpi("gtest-options_test", exe_str) == 0 ||
- _strcmpi("gtest-options-ex_test", exe_str) == 0 ||
- _strcmpi("gtest_all_test", exe_str) == 0)
- << "GetCurrentExecutableName() returns " << exe_str;
+ const bool success =
+ _strcmpi("gtest-options_test", exe_str.c_str()) == 0 ||
+ _strcmpi("gtest-options-ex_test", exe_str.c_str()) == 0 ||
+ _strcmpi("gtest_all_test", exe_str.c_str()) == 0 ||
+ _strcmpi("gtest_dll_test", exe_str.c_str()) == 0;
#else
// TODO(wan@google.com): remove the hard-coded "lt-" prefix when
// Chandler Carruth's libtool replacement is ready.
- EXPECT_TRUE(String(exe_str) == "gtest-options_test" ||
- String(exe_str) == "lt-gtest-options_test" ||
- String(exe_str) == "gtest_all_test" ||
- String(exe_str) == "lt-gtest_all_test")
- << "GetCurrentExecutableName() returns " << exe_str;
+ const bool success =
+ exe_str == "gtest-options_test" ||
+ exe_str == "gtest_all_test" ||
+ exe_str == "lt-gtest_all_test" ||
+ exe_str == "gtest_dll_test";
#endif // GTEST_OS_WINDOWS
+ if (!success)
+ FAIL() << "GetCurrentExecutableName() returns " << exe_str;
}
class XmlOutputChangeDirTest : public Test {
@@ -185,40 +162,17 @@ TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithRelativeFile) {
}
TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithRelativePath) {
-#if GTEST_OS_WINDOWS
- GTEST_FLAG(output) = "xml:path\\";
+ GTEST_FLAG(output) = "xml:path" GTEST_PATH_SEP_;
+ const std::string expected_output_file =
+ FilePath::ConcatPaths(
+ original_working_dir_,
+ FilePath(std::string("path") + GTEST_PATH_SEP_ +
+ GetCurrentExecutableName().c_str() + ".xml")).c_str();
const String& output_file = UnitTestOptions::GetAbsolutePathToOutputFile();
- EXPECT_TRUE(
- _strcmpi(output_file.c_str(),
- FilePath::ConcatPaths(
- original_working_dir_,
- FilePath("path\\gtest-options_test.xml")).c_str()) == 0 ||
- _strcmpi(output_file.c_str(),
- FilePath::ConcatPaths(
- original_working_dir_,
- FilePath("path\\gtest-options-ex_test.xml")).c_str()) == 0 ||
- _strcmpi(output_file.c_str(),
- FilePath::ConcatPaths(
- original_working_dir_,
- FilePath("path\\gtest_all_test.xml")).c_str()) == 0)
- << " output_file = " << output_file;
+#if GTEST_OS_WINDOWS
+ EXPECT_STRCASEEQ(expected_output_file.c_str(), output_file.c_str());
#else
- GTEST_FLAG(output) = "xml:path/";
- const String& output_file = UnitTestOptions::GetAbsolutePathToOutputFile();
- // TODO(wan@google.com): libtool causes the test binary file to be
- // named lt-gtest-options_test. Therefore the output file may be
- // named .../lt-gtest-options_test.xml. We should remove this
- // hard-coded logic when Chandler Carruth's libtool replacement is
- // ready.
- EXPECT_TRUE(output_file == FilePath::ConcatPaths(original_working_dir_,
- FilePath("path/gtest-options_test.xml")).c_str() ||
- output_file == FilePath::ConcatPaths(original_working_dir_,
- FilePath("path/lt-gtest-options_test.xml")).c_str() ||
- output_file == FilePath::ConcatPaths(original_working_dir_,
- FilePath("path/gtest_all_test.xml")).c_str() ||
- output_file == FilePath::ConcatPaths(original_working_dir_,
- FilePath("path/lt-gtest_all_test.xml")).c_str())
- << " output_file = " << output_file;
+ EXPECT_EQ(expected_output_file, output_file.c_str());
#endif
}
@@ -236,29 +190,20 @@ TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithAbsoluteFile) {
TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithAbsolutePath) {
#if GTEST_OS_WINDOWS
- GTEST_FLAG(output) = "xml:c:\\tmp\\";
- const String& output_file = UnitTestOptions::GetAbsolutePathToOutputFile();
- EXPECT_TRUE(
- _strcmpi(output_file.c_str(),
- FilePath("c:\\tmp\\gtest-options_test.xml").c_str()) == 0 ||
- _strcmpi(output_file.c_str(),
- FilePath("c:\\tmp\\gtest-options-ex_test.xml").c_str()) == 0 ||
- _strcmpi(output_file.c_str(),
- FilePath("c:\\tmp\\gtest_all_test.xml").c_str()) == 0)
- << " output_file = " << output_file;
+ const std::string path = "c:\\tmp\\";
#else
- GTEST_FLAG(output) = "xml:/tmp/";
+ const std::string path = "/tmp/";
+#endif
+
+ GTEST_FLAG(output) = "xml:" + path;
+ const std::string expected_output_file =
+ path + GetCurrentExecutableName().c_str() + ".xml";
const String& output_file = UnitTestOptions::GetAbsolutePathToOutputFile();
- // TODO(wan@google.com): libtool causes the test binary file to be
- // named lt-gtest-options_test. Therefore the output file may be
- // named .../lt-gtest-options_test.xml. We should remove this
- // hard-coded logic when Chandler Carruth's libtool replacement is
- // ready.
- EXPECT_TRUE(output_file == "/tmp/gtest-options_test.xml" ||
- output_file == "/tmp/lt-gtest-options_test.xml" ||
- output_file == "/tmp/gtest_all_test.xml" ||
- output_file == "/tmp/lt-gtest_all_test.xml")
- << " output_file = " << output_file;
+
+#if GTEST_OS_WINDOWS
+ EXPECT_STRCASEEQ(expected_output_file.c_str(), output_file.c_str());
+#else
+ EXPECT_EQ(expected_output_file, output_file.c_str());
#endif
}
diff --git a/gtest/test/gtest-param-test_test.cc b/gtest/test/gtest-param-test_test.cc
index ecb5fdb..d0a0e73 100644
--- a/gtest/test/gtest-param-test_test.cc
+++ b/gtest/test/gtest-param-test_test.cc
@@ -40,6 +40,8 @@
#include <algorithm>
#include <iostream>
#include <list>
+#include <sstream>
+#include <string>
#include <vector>
// To include gtest-internal-inl.h.
@@ -70,6 +72,57 @@ using ::std::tr1::tuple;
using ::testing::internal::ParamGenerator;
using ::testing::internal::UnitTestOptions;
+// Prints a value to a string.
+//
+// TODO(wan@google.com): remove PrintValue() when we move matchers and
+// EXPECT_THAT() from Google Mock to Google Test. At that time, we
+// can write EXPECT_THAT(x, Eq(y)) to compare two tuples x and y, as
+// EXPECT_THAT() and the matchers know how to print tuples.
+template <typename T>
+::std::string PrintValue(const T& value) {
+ ::std::stringstream stream;
+ stream << value;
+ return stream.str();
+}
+
+#if GTEST_HAS_COMBINE
+
+// These overloads allow printing tuples in our tests. We cannot
+// define an operator<< for tuples, as that definition needs to be in
+// the std namespace in order to be picked up by Google Test via
+// Argument-Dependent Lookup, yet defining anything in the std
+// namespace in non-STL code is undefined behavior.
+
+template <typename T1, typename T2>
+::std::string PrintValue(const tuple<T1, T2>& value) {
+ ::std::stringstream stream;
+ stream << "(" << get<0>(value) << ", " << get<1>(value) << ")";
+ return stream.str();
+}
+
+template <typename T1, typename T2, typename T3>
+::std::string PrintValue(const tuple<T1, T2, T3>& value) {
+ ::std::stringstream stream;
+ stream << "(" << get<0>(value) << ", " << get<1>(value)
+ << ", "<< get<2>(value) << ")";
+ return stream.str();
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+::std::string PrintValue(
+ const tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>& value) {
+ ::std::stringstream stream;
+ stream << "(" << get<0>(value) << ", " << get<1>(value)
+ << ", "<< get<2>(value) << ", " << get<3>(value)
+ << ", "<< get<4>(value) << ", " << get<5>(value)
+ << ", "<< get<6>(value) << ", " << get<7>(value)
+ << ", "<< get<8>(value) << ", " << get<9>(value) << ")";
+ return stream.str();
+}
+
+#endif // GTEST_HAS_COMBINE
+
// Verifies that a sequence generated by the generator and accessed
// via the iterator object matches the expected one using Google Test
// assertions.
@@ -80,15 +133,19 @@ void VerifyGenerator(const ParamGenerator<T>& generator,
for (size_t i = 0; i < N; ++i) {
ASSERT_FALSE(it == generator.end())
<< "At element " << i << " when accessing via an iterator "
- << "created with the copy constructor." << std::endl;
- EXPECT_EQ(expected_values[i], *it)
- << "At element " << i << " when accessing via an iterator "
- << "created with the copy constructor." << std::endl;
+ << "created with the copy constructor.\n";
+ // We cannot use EXPECT_EQ() here as the values may be tuples,
+ // which don't support <<.
+ EXPECT_TRUE(expected_values[i] == *it)
+ << "where i is " << i
+ << ", expected_values[i] is " << PrintValue(expected_values[i])
+ << ", *it is " << PrintValue(*it)
+ << ", and 'it' is an iterator created with the copy constructor.\n";
it++;
}
EXPECT_TRUE(it == generator.end())
<< "At the presumed end of sequence when accessing via an iterator "
- << "created with the copy constructor." << std::endl;
+ << "created with the copy constructor.\n";
// Test the iterator assignment. The following lines verify that
// the sequence accessed via an iterator initialized via the
@@ -98,15 +155,17 @@ void VerifyGenerator(const ParamGenerator<T>& generator,
for (size_t i = 0; i < N; ++i) {
ASSERT_FALSE(it == generator.end())
<< "At element " << i << " when accessing via an iterator "
- << "created with the assignment operator." << std::endl;
- EXPECT_EQ(expected_values[i], *it)
- << "At element " << i << " when accessing via an iterator "
- << "created with the assignment operator." << std::endl;
+ << "created with the assignment operator.\n";
+ EXPECT_TRUE(expected_values[i] == *it)
+ << "where i is " << i
+ << ", expected_values[i] is " << PrintValue(expected_values[i])
+ << ", *it is " << PrintValue(*it)
+ << ", and 'it' is an iterator created with the copy constructor.\n";
it++;
}
EXPECT_TRUE(it == generator.end())
<< "At the presumed end of sequence when accessing via an iterator "
- << "created with the assignment operator." << std::endl;
+ << "created with the assignment operator.\n";
}
template <typename T>
@@ -205,7 +264,7 @@ TEST(RangeTest, IntRangeWithCustomStepOverUpperBound) {
// copy constructor, operator=(), operator+(), and operator<().
class DogAdder {
public:
- explicit DogAdder(const char* value) : value_(value) {}
+ explicit DogAdder(const char* a_value) : value_(a_value) {}
DogAdder(const DogAdder& other) : value_(other.value_.c_str()) {}
DogAdder operator=(const DogAdder& other) {
@@ -243,7 +302,7 @@ TEST(RangeTest, WorksWithACustomType) {
class IntWrapper {
public:
- explicit IntWrapper(int value) : value_(value) {}
+ explicit IntWrapper(int a_value) : value_(a_value) {}
IntWrapper(const IntWrapper& other) : value_(other.value_) {}
IntWrapper operator=(const IntWrapper& other) {
@@ -400,33 +459,6 @@ TEST(BoolTest, BoolWorks) {
#if GTEST_HAS_COMBINE
-template <typename T1, typename T2>
-::std::ostream& operator<<(::std::ostream& stream, const tuple<T1, T2>& value) {
- stream << "(" << get<0>(value) << ", " << get<1>(value) << ")";
- return stream;
-}
-
-template <typename T1, typename T2, typename T3>
-::std::ostream& operator<<(::std::ostream& stream,
- const tuple<T1, T2, T3>& value) {
- stream << "(" << get<0>(value) << ", " << get<1>(value)
- << ", "<< get<2>(value) << ")";
- return stream;
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10>
-::std::ostream& operator<<(
- ::std::ostream& stream,
- const tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>& value) {
- stream << "(" << get<0>(value) << ", " << get<1>(value)
- << ", "<< get<2>(value) << ", " << get<3>(value)
- << ", "<< get<4>(value) << ", " << get<5>(value)
- << ", "<< get<6>(value) << ", " << get<7>(value)
- << ", "<< get<8>(value) << ", " << get<9>(value) << ")";
- return stream;
-}
-
// Tests that Combine() with two parameters generates the expected sequence.
TEST(CombineTest, CombineWithTwoParameters) {
const char* foo = "foo";
@@ -660,13 +692,15 @@ INSTANTIATE_TEST_CASE_P(TestExpansionModule, TestGenerationTest,
ValuesIn(test_generation_params));
// This test verifies that the element sequence (third parameter of
-// INSTANTIATE_TEST_CASE_P) is evaluated in RUN_ALL_TESTS and not at the call
-// site of INSTANTIATE_TEST_CASE_P.
-// For that, we declare param_value_ to be a static member of
-// GeneratorEvaluationTest and initialize it to 0. We set it to 1 in main(),
-// just before invocation of RUN_ALL_TESTS. If the sequence is evaluated
-// before that moment, INSTANTIATE_TEST_CASE_P will create a test with
-// parameter 0, and the test body will fail the assertion.
+// INSTANTIATE_TEST_CASE_P) is evaluated in InitGoogleTest() and neither at
+// the call site of INSTANTIATE_TEST_CASE_P nor in RUN_ALL_TESTS(). For
+// that, we declare param_value_ to be a static member of
+// GeneratorEvaluationTest and initialize it to 0. We set it to 1 in
+// main(), just before invocation of InitGoogleTest(). After calling
+// InitGoogleTest(), we set the value to 2. If the sequence is evaluated
+// before or after InitGoogleTest, INSTANTIATE_TEST_CASE_P will create a
+// test with parameter other than 1, and the test body will fail the
+// assertion.
class GeneratorEvaluationTest : public TestWithParam<int> {
public:
static int param_value() { return param_value_; }
@@ -783,10 +817,19 @@ int main(int argc, char **argv) {
#if GTEST_HAS_PARAM_TEST
// Used in TestGenerationTest test case.
AddGlobalTestEnvironment(TestGenerationTest::Environment::Instance());
- // Used in GeneratorEvaluationTest test case.
+ // Used in GeneratorEvaluationTest test case. Tests that the updated value
+ // will be picked up for instantiating tests in GeneratorEvaluationTest.
GeneratorEvaluationTest::set_param_value(1);
#endif // GTEST_HAS_PARAM_TEST
::testing::InitGoogleTest(&argc, argv);
+
+#if GTEST_HAS_PARAM_TEST
+ // Used in GeneratorEvaluationTest test case. Tests that value updated
+ // here will NOT be used for instantiating tests in
+ // GeneratorEvaluationTest.
+ GeneratorEvaluationTest::set_param_value(2);
+#endif // GTEST_HAS_PARAM_TEST
+
return RUN_ALL_TESTS();
}
diff --git a/gtest/test/gtest-port_test.cc b/gtest/test/gtest-port_test.cc
index df59f9e..3725860 100644
--- a/gtest/test/gtest-port_test.cc
+++ b/gtest/test/gtest-port_test.cc
@@ -33,11 +33,14 @@
#include <gtest/internal/gtest-port.h>
+#include <stdio.h>
+
#if GTEST_OS_MAC
-#include <pthread.h>
#include <time.h>
#endif // GTEST_OS_MAC
+#include <utility> // For std::pair and std::make_pair.
+
#include <gtest/gtest.h>
#include <gtest/gtest-spi.h>
@@ -50,9 +53,20 @@
#include "src/gtest-internal-inl.h"
#undef GTEST_IMPLEMENTATION_
+using std::make_pair;
+using std::pair;
+
namespace testing {
namespace internal {
+// Tests that the element_type typedef is available in scoped_ptr and refers
+// to the parameter type.
+TEST(ScopedPtrTest, DefinesElementType) {
+ StaticAssertTypeEq<int, ::testing::internal::scoped_ptr<int>::element_type>();
+}
+
+// TODO(vladl@google.com): Implement THE REST of scoped_ptr tests.
+
TEST(GtestCheckSyntaxTest, BehavesLikeASingleStatement) {
if (AlwaysFalse())
GTEST_CHECK_(false) << "This should never be executed; "
@@ -84,7 +98,7 @@ TEST(GtestCheckSyntaxTest, WorksWithSwitch) {
#if GTEST_OS_MAC
void* ThreadFunc(void* data) {
- pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(data);
+ pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data);
pthread_mutex_lock(mutex);
pthread_mutex_unlock(mutex);
return NULL;
@@ -119,10 +133,7 @@ TEST(GetThreadCountTest, ReturnsCorrectValue) {
if (GetThreadCount() == 1)
break;
- timespec time;
- time.tv_sec = 0;
- time.tv_nsec = 100L * 1000 * 1000; // .1 seconds.
- nanosleep(&time, NULL);
+ SleepMilliseconds(100);
}
EXPECT_EQ(1U, GetThreadCount());
pthread_mutex_destroy(&mutex);
@@ -161,15 +172,15 @@ TEST(GtestCheckDeathTest, LivesSilentlyOnSuccess) {
#if GTEST_USES_POSIX_RE
+#if GTEST_HAS_TYPED_TEST
+
template <typename Str>
class RETest : public ::testing::Test {};
// Defines StringTypes as the list of all string types that class RE
// supports.
typedef testing::Types<
-#if GTEST_HAS_STD_STRING
::std::string,
-#endif // GTEST_HAS_STD_STRING
#if GTEST_HAS_GLOBAL_STRING
::string,
#endif // GTEST_HAS_GLOBAL_STRING
@@ -223,6 +234,8 @@ TYPED_TEST(RETest, PartialMatchWorks) {
EXPECT_FALSE(RE::PartialMatch(TypeParam("zza"), re));
}
+#endif // GTEST_HAS_TYPED_TEST
+
#elif GTEST_USES_SIMPLE_RE
TEST(IsInSetTest, NulCharIsNotInAnySet) {
@@ -689,11 +702,317 @@ TEST(RETest, PartialMatchWorks) {
#endif // GTEST_USES_POSIX_RE
-TEST(CaptureStderrTest, CapturesStdErr) {
+#if !GTEST_OS_WINDOWS_MOBILE
+
+TEST(CaptureTest, CapturesStdout) {
+ CaptureStdout();
+ fprintf(stdout, "abc");
+ EXPECT_STREQ("abc", GetCapturedStdout().c_str());
+
+ CaptureStdout();
+ fprintf(stdout, "def%cghi", '\0');
+ EXPECT_EQ(::std::string("def\0ghi", 7), ::std::string(GetCapturedStdout()));
+}
+
+TEST(CaptureTest, CapturesStderr) {
+ CaptureStderr();
+ fprintf(stderr, "jkl");
+ EXPECT_STREQ("jkl", GetCapturedStderr().c_str());
+
+ CaptureStderr();
+ fprintf(stderr, "jkl%cmno", '\0');
+ EXPECT_EQ(::std::string("jkl\0mno", 7), ::std::string(GetCapturedStderr()));
+}
+
+// Tests that stdout and stderr capture don't interfere with each other.
+TEST(CaptureTest, CapturesStdoutAndStderr) {
+ CaptureStdout();
CaptureStderr();
- fprintf(stderr, "abc");
- ASSERT_STREQ("abc", GetCapturedStderr().c_str());
+ fprintf(stdout, "pqr");
+ fprintf(stderr, "stu");
+ EXPECT_STREQ("pqr", GetCapturedStdout().c_str());
+ EXPECT_STREQ("stu", GetCapturedStderr().c_str());
+}
+
+TEST(CaptureDeathTest, CannotReenterStdoutCapture) {
+ CaptureStdout();
+ EXPECT_DEATH_IF_SUPPORTED(CaptureStdout();,
+ "Only one stdout capturer can exist at a time");
+ GetCapturedStdout();
+
+ // We cannot test stderr capturing using death tests as they use it
+ // themselves.
+}
+
+#endif // !GTEST_OS_WINDOWS_MOBILE
+
+TEST(ThreadLocalTest, DefaultConstructorInitializesToDefaultValues) {
+ ThreadLocal<int> t1;
+ EXPECT_EQ(0, t1.get());
+
+ ThreadLocal<void*> t2;
+ EXPECT_TRUE(t2.get() == NULL);
+}
+
+TEST(ThreadLocalTest, SingleParamConstructorInitializesToParam) {
+ ThreadLocal<int> t1(123);
+ EXPECT_EQ(123, t1.get());
+
+ int i = 0;
+ ThreadLocal<int*> t2(&i);
+ EXPECT_EQ(&i, t2.get());
+}
+
+class NoDefaultContructor {
+ public:
+ explicit NoDefaultContructor(const char*) {}
+ NoDefaultContructor(const NoDefaultContructor&) {}
+};
+
+TEST(ThreadLocalTest, ValueDefaultContructorIsNotRequiredForParamVersion) {
+ ThreadLocal<NoDefaultContructor> bar(NoDefaultContructor("foo"));
+ bar.pointer();
+}
+
+TEST(ThreadLocalTest, GetAndPointerReturnSameValue) {
+ ThreadLocal<String> thread_local;
+
+ EXPECT_EQ(thread_local.pointer(), &(thread_local.get()));
+
+ // Verifies the condition still holds after calling set.
+ thread_local.set("foo");
+ EXPECT_EQ(thread_local.pointer(), &(thread_local.get()));
+}
+
+TEST(ThreadLocalTest, PointerAndConstPointerReturnSameValue) {
+ ThreadLocal<String> thread_local;
+ const ThreadLocal<String>& const_thread_local = thread_local;
+
+ EXPECT_EQ(thread_local.pointer(), const_thread_local.pointer());
+
+ thread_local.set("foo");
+ EXPECT_EQ(thread_local.pointer(), const_thread_local.pointer());
+}
+
+#if GTEST_IS_THREADSAFE
+
+void AddTwo(int* param) { *param += 2; }
+
+TEST(ThreadWithParamTest, ConstructorExecutesThreadFunc) {
+ int i = 40;
+ ThreadWithParam<int*> thread(&AddTwo, &i, NULL);
+ thread.Join();
+ EXPECT_EQ(42, i);
}
+TEST(MutexDeathTest, AssertHeldShouldAssertWhenNotLocked) {
+ // AssertHeld() is flaky only in the presence of multiple threads accessing
+ // the lock. In this case, the test is robust.
+ EXPECT_DEATH_IF_SUPPORTED({
+ Mutex m;
+ { MutexLock lock(&m); }
+ m.AssertHeld();
+ },
+ "thread .*hold");
+}
+
+TEST(MutexTest, AssertHeldShouldNotAssertWhenLocked) {
+ Mutex m;
+ MutexLock lock(&m);
+ m.AssertHeld();
+}
+
+class AtomicCounterWithMutex {
+ public:
+ explicit AtomicCounterWithMutex(Mutex* mutex) :
+ value_(0), mutex_(mutex), random_(42) {}
+
+ void Increment() {
+ MutexLock lock(mutex_);
+ int temp = value_;
+ {
+ // Locking a mutex puts up a memory barrier, preventing reads and
+ // writes to value_ rearranged when observed from other threads.
+ //
+ // We cannot use Mutex and MutexLock here or rely on their memory
+ // barrier functionality as we are testing them here.
+ pthread_mutex_t memory_barrier_mutex;
+ GTEST_CHECK_POSIX_SUCCESS_(
+ pthread_mutex_init(&memory_barrier_mutex, NULL));
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&memory_barrier_mutex));
+
+ SleepMilliseconds(random_.Generate(30));
+
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&memory_barrier_mutex));
+ }
+ value_ = temp + 1;
+ }
+ int value() const { return value_; }
+
+ private:
+ volatile int value_;
+ Mutex* const mutex_; // Protects value_.
+ Random random_;
+};
+
+void CountingThreadFunc(pair<AtomicCounterWithMutex*, int> param) {
+ for (int i = 0; i < param.second; ++i)
+ param.first->Increment();
+}
+
+// Tests that the mutex only lets one thread at a time to lock it.
+TEST(MutexTest, OnlyOneThreadCanLockAtATime) {
+ Mutex mutex;
+ AtomicCounterWithMutex locked_counter(&mutex);
+
+ typedef ThreadWithParam<pair<AtomicCounterWithMutex*, int> > ThreadType;
+ const int kCycleCount = 20;
+ const int kThreadCount = 7;
+ scoped_ptr<ThreadType> counting_threads[kThreadCount];
+ Notification threads_can_start;
+ // Creates and runs kThreadCount threads that increment locked_counter
+ // kCycleCount times each.
+ for (int i = 0; i < kThreadCount; ++i) {
+ counting_threads[i].reset(new ThreadType(&CountingThreadFunc,
+ make_pair(&locked_counter,
+ kCycleCount),
+ &threads_can_start));
+ }
+ threads_can_start.Notify();
+ for (int i = 0; i < kThreadCount; ++i)
+ counting_threads[i]->Join();
+
+ // If the mutex lets more than one thread to increment the counter at a
+ // time, they are likely to encounter a race condition and have some
+ // increments overwritten, resulting in the lower then expected counter
+ // value.
+ EXPECT_EQ(kCycleCount * kThreadCount, locked_counter.value());
+}
+
+template <typename T>
+void RunFromThread(void (func)(T), T param) {
+ ThreadWithParam<T> thread(func, param, NULL);
+ thread.Join();
+}
+
+void RetrieveThreadLocalValue(pair<ThreadLocal<String>*, String*> param) {
+ *param.second = param.first->get();
+}
+
+TEST(ThreadLocalTest, ParameterizedConstructorSetsDefault) {
+ ThreadLocal<String> thread_local("foo");
+ EXPECT_STREQ("foo", thread_local.get().c_str());
+
+ thread_local.set("bar");
+ EXPECT_STREQ("bar", thread_local.get().c_str());
+
+ String result;
+ RunFromThread(&RetrieveThreadLocalValue, make_pair(&thread_local, &result));
+ EXPECT_STREQ("foo", result.c_str());
+}
+
+// DestructorTracker keeps track of whether its instances have been
+// destroyed.
+static std::vector<bool> g_destroyed;
+
+class DestructorTracker {
+ public:
+ DestructorTracker() : index_(GetNewIndex()) {}
+ DestructorTracker(const DestructorTracker& /* rhs */)
+ : index_(GetNewIndex()) {}
+ ~DestructorTracker() {
+ // We never access g_destroyed concurrently, so we don't need to
+ // protect the write operation under a mutex.
+ g_destroyed[index_] = true;
+ }
+
+ private:
+ static int GetNewIndex() {
+ g_destroyed.push_back(false);
+ return g_destroyed.size() - 1;
+ }
+ const int index_;
+};
+
+typedef ThreadLocal<DestructorTracker>* ThreadParam;
+
+void CallThreadLocalGet(ThreadParam thread_local) {
+ thread_local->get();
+}
+
+// Tests that when a ThreadLocal object dies in a thread, it destroys
+// the managed object for that thread.
+TEST(ThreadLocalTest, DestroysManagedObjectForOwnThreadWhenDying) {
+ g_destroyed.clear();
+
+ {
+ // The next line default constructs a DestructorTracker object as
+ // the default value of objects managed by thread_local.
+ ThreadLocal<DestructorTracker> thread_local;
+ ASSERT_EQ(1U, g_destroyed.size());
+ ASSERT_FALSE(g_destroyed[0]);
+
+ // This creates another DestructorTracker object for the main thread.
+ thread_local.get();
+ ASSERT_EQ(2U, g_destroyed.size());
+ ASSERT_FALSE(g_destroyed[0]);
+ ASSERT_FALSE(g_destroyed[1]);
+ }
+
+ // Now thread_local has died. It should have destroyed both the
+ // default value shared by all threads and the value for the main
+ // thread.
+ ASSERT_EQ(2U, g_destroyed.size());
+ EXPECT_TRUE(g_destroyed[0]);
+ EXPECT_TRUE(g_destroyed[1]);
+
+ g_destroyed.clear();
+}
+
+// Tests that when a thread exits, the thread-local object for that
+// thread is destroyed.
+TEST(ThreadLocalTest, DestroysManagedObjectAtThreadExit) {
+ g_destroyed.clear();
+
+ {
+ // The next line default constructs a DestructorTracker object as
+ // the default value of objects managed by thread_local.
+ ThreadLocal<DestructorTracker> thread_local;
+ ASSERT_EQ(1U, g_destroyed.size());
+ ASSERT_FALSE(g_destroyed[0]);
+
+ // This creates another DestructorTracker object in the new thread.
+ ThreadWithParam<ThreadParam> thread(
+ &CallThreadLocalGet, &thread_local, NULL);
+ thread.Join();
+
+ // Now the new thread has exited. The per-thread object for it
+ // should have been destroyed.
+ ASSERT_EQ(2U, g_destroyed.size());
+ ASSERT_FALSE(g_destroyed[0]);
+ ASSERT_TRUE(g_destroyed[1]);
+ }
+
+ // Now thread_local has died. The default value should have been
+ // destroyed too.
+ ASSERT_EQ(2U, g_destroyed.size());
+ EXPECT_TRUE(g_destroyed[0]);
+ EXPECT_TRUE(g_destroyed[1]);
+
+ g_destroyed.clear();
+}
+
+TEST(ThreadLocalTest, ThreadLocalMutationsAffectOnlyCurrentThread) {
+ ThreadLocal<String> thread_local;
+ thread_local.set("Foo");
+ EXPECT_STREQ("Foo", thread_local.get().c_str());
+
+ String result;
+ RunFromThread(&RetrieveThreadLocalValue, make_pair(&thread_local, &result));
+ EXPECT_TRUE(result.c_str() == NULL);
+}
+
+#endif // GTEST_IS_THREADSAFE
+
} // namespace internal
} // namespace testing
diff --git a/gtest/test/gtest-test-part_test.cc b/gtest/test/gtest-test-part_test.cc
index 403c184..5a3e919 100644
--- a/gtest/test/gtest-test-part_test.cc
+++ b/gtest/test/gtest-test-part_test.cc
@@ -34,6 +34,7 @@
#include <gtest/gtest.h>
+using testing::Message;
using testing::Test;
using testing::TestPartResult;
using testing::TestPartResultArray;
@@ -53,6 +54,54 @@ class TestPartResultTest : public Test {
TestPartResult r1_, r2_, r3_;
};
+
+TEST_F(TestPartResultTest, ConstructorWorks) {
+ Message message;
+ message << "something is terribly wrong";
+ message << static_cast<const char*>(testing::internal::kStackTraceMarker);
+ message << "some unimportant stack trace";
+
+ const TestPartResult result(TestPartResult::kNonFatalFailure,
+ "some_file.cc",
+ 42,
+ message.GetString().c_str());
+
+ EXPECT_EQ(TestPartResult::kNonFatalFailure, result.type());
+ EXPECT_STREQ("some_file.cc", result.file_name());
+ EXPECT_EQ(42, result.line_number());
+ EXPECT_STREQ(message.GetString().c_str(), result.message());
+ EXPECT_STREQ("something is terribly wrong", result.summary());
+}
+
+TEST_F(TestPartResultTest, ResultAccessorsWork) {
+ const TestPartResult success(TestPartResult::kSuccess,
+ "file.cc",
+ 42,
+ "message");
+ EXPECT_TRUE(success.passed());
+ EXPECT_FALSE(success.failed());
+ EXPECT_FALSE(success.nonfatally_failed());
+ EXPECT_FALSE(success.fatally_failed());
+
+ const TestPartResult nonfatal_failure(TestPartResult::kNonFatalFailure,
+ "file.cc",
+ 42,
+ "message");
+ EXPECT_FALSE(nonfatal_failure.passed());
+ EXPECT_TRUE(nonfatal_failure.failed());
+ EXPECT_TRUE(nonfatal_failure.nonfatally_failed());
+ EXPECT_FALSE(nonfatal_failure.fatally_failed());
+
+ const TestPartResult fatal_failure(TestPartResult::kFatalFailure,
+ "file.cc",
+ 42,
+ "message");
+ EXPECT_FALSE(fatal_failure.passed());
+ EXPECT_TRUE(fatal_failure.failed());
+ EXPECT_FALSE(fatal_failure.nonfatally_failed());
+ EXPECT_TRUE(fatal_failure.fatally_failed());
+}
+
// Tests TestPartResult::type().
TEST_F(TestPartResultTest, type) {
EXPECT_EQ(TestPartResult::kSuccess, r1_.type());
diff --git a/gtest/test/gtest-tuple_test.cc b/gtest/test/gtest-tuple_test.cc
index 3829118..532f70b 100644
--- a/gtest/test/gtest-tuple_test.cc
+++ b/gtest/test/gtest-tuple_test.cc
@@ -135,12 +135,44 @@ TEST(ReferenceFieldTest, IsAliasOfReferencedVariable) {
<< "Changing a reference field should update the underlying variable.";
}
-// Tests tuple's default constructor.
-TEST(TupleConstructorTest, DefaultConstructor) {
- // We are just testing that the following compiles.
+// Tests that tuple's default constructor default initializes each field.
+// This test needs to compile without generating warnings.
+TEST(TupleConstructorTest, DefaultConstructorDefaultInitializesEachField) {
+ // The TR1 report requires that tuple's default constructor default
+ // initializes each field, even if it's a primitive type. If the
+ // implementation forgets to do this, this test will catch it by
+ // generating warnings about using uninitialized variables (assuming
+ // a decent compiler).
+
tuple<> empty;
- tuple<int> one_field;
- tuple<double, char, bool*> three_fields;
+
+ tuple<int> a1, b1;
+ b1 = a1;
+ EXPECT_EQ(0, get<0>(b1));
+
+ tuple<int, double> a2, b2;
+ b2 = a2;
+ EXPECT_EQ(0, get<0>(b2));
+ EXPECT_EQ(0.0, get<1>(b2));
+
+ tuple<double, char, bool*> a3, b3;
+ b3 = a3;
+ EXPECT_EQ(0.0, get<0>(b3));
+ EXPECT_EQ('\0', get<1>(b3));
+ EXPECT_TRUE(get<2>(b3) == NULL);
+
+ tuple<int, int, int, int, int, int, int, int, int, int> a10, b10;
+ b10 = a10;
+ EXPECT_EQ(0, get<0>(b10));
+ EXPECT_EQ(0, get<1>(b10));
+ EXPECT_EQ(0, get<2>(b10));
+ EXPECT_EQ(0, get<3>(b10));
+ EXPECT_EQ(0, get<4>(b10));
+ EXPECT_EQ(0, get<5>(b10));
+ EXPECT_EQ(0, get<6>(b10));
+ EXPECT_EQ(0, get<7>(b10));
+ EXPECT_EQ(0, get<8>(b10));
+ EXPECT_EQ(0, get<9>(b10));
}
// Tests constructing a tuple from its fields.
diff --git a/gtest/test/gtest-typed-test_test.cc b/gtest/test/gtest-typed-test_test.cc
index 4b6e971..f2c3972 100644
--- a/gtest/test/gtest-typed-test_test.cc
+++ b/gtest/test/gtest-typed-test_test.cc
@@ -349,12 +349,12 @@ INSTANTIATE_TYPED_TEST_CASE_P(My, NumericTest, NumericTypes);
#if !defined(GTEST_HAS_TYPED_TEST) && !defined(GTEST_HAS_TYPED_TEST_P)
-// Google Test doesn't support type-parameterized tests on some platforms
-// and compilers, such as MSVC 7.1. If we use conditional compilation to
-// compile out all code referring to the gtest_main library, MSVC linker
-// will not link that library at all and consequently complain about
-// missing entry point defined in that library (fatal error LNK1561:
-// entry point must be defined). This dummy test keeps gtest_main linked in.
+// Google Test may not support type-parameterized tests with some
+// compilers. If we use conditional compilation to compile out all
+// code referring to the gtest_main library, MSVC linker will not link
+// that library at all and consequently complain about missing entry
+// point defined in that library (fatal error LNK1561: entry point
+// must be defined). This dummy test keeps gtest_main linked in.
TEST(DummyTest, TypedTestsAreNotSupportedOnThisPlatform) {}
#endif // #if !defined(GTEST_HAS_TYPED_TEST) && !defined(GTEST_HAS_TYPED_TEST_P)
diff --git a/gtest/test/gtest_all_test.cc b/gtest/test/gtest_all_test.cc
index 955aa62..e1edb08 100644
--- a/gtest/test/gtest_all_test.cc
+++ b/gtest/test/gtest_all_test.cc
@@ -45,3 +45,4 @@
#include "test/gtest-typed-test2_test.cc"
#include "test/gtest_unittest.cc"
#include "test/production.cc"
+#include "src/gtest_main.cc"
diff --git a/gtest/test/gtest_break_on_failure_unittest.py b/gtest/test/gtest_break_on_failure_unittest.py
index 218d371..c819183 100755
--- a/gtest/test/gtest_break_on_failure_unittest.py
+++ b/gtest/test/gtest_break_on_failure_unittest.py
@@ -69,21 +69,24 @@ EXE_PATH = gtest_test_utils.GetTestExecutablePath(
# Utilities.
+environ = os.environ.copy()
+
+
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
if value is not None:
- os.environ[env_var] = value
- elif env_var in os.environ:
- del os.environ[env_var]
+ environ[env_var] = value
+ elif env_var in environ:
+ del environ[env_var]
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
- p = gtest_test_utils.Subprocess(command)
+ p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
diff --git a/gtest/test/gtest_break_on_failure_unittest_.cc b/gtest/test/gtest_break_on_failure_unittest_.cc
index 10a1203..d28d1d3 100644
--- a/gtest/test/gtest_break_on_failure_unittest_.cc
+++ b/gtest/test/gtest_break_on_failure_unittest_.cc
@@ -43,6 +43,7 @@
#if GTEST_OS_WINDOWS
#include <windows.h>
+#include <stdlib.h>
#endif
namespace {
@@ -52,6 +53,14 @@ TEST(Foo, Bar) {
EXPECT_EQ(2, 3);
}
+#if GTEST_HAS_SEH && !GTEST_OS_WINDOWS_MOBILE
+// On Windows Mobile global exception handlers are not supported.
+LONG WINAPI ExitWithExceptionCode(
+ struct _EXCEPTION_POINTERS* exception_pointers) {
+ exit(exception_pointers->ExceptionRecord->ExceptionCode);
+}
+#endif
+
} // namespace
int main(int argc, char **argv) {
@@ -59,7 +68,18 @@ int main(int argc, char **argv) {
// Suppresses display of the Windows error dialog upon encountering
// a general protection fault (segment violation).
SetErrorMode(SEM_NOGPFAULTERRORBOX | SEM_FAILCRITICALERRORS);
+
+#if !GTEST_OS_WINDOWS_MOBILE
+ // The default unhandled exception filter does not always exit
+ // with the exception code as exit code - for example it exits with
+ // 0 for EXCEPTION_ACCESS_VIOLATION and 1 for EXCEPTION_BREAKPOINT
+ // if the application is compiled in debug mode. Thus we use our own
+ // filter which always exits with the exception code for unhandled
+ // exceptions.
+ SetUnhandledExceptionFilter(ExitWithExceptionCode);
+#endif
#endif
+
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
diff --git a/gtest/test/gtest_color_test_.cc b/gtest/test/gtest_color_test_.cc
index 305aeb9..58d377c 100644
--- a/gtest/test/gtest_color_test_.cc
+++ b/gtest/test/gtest_color_test_.cc
@@ -37,11 +37,14 @@
#include <gtest/gtest.h>
-namespace testing {
-namespace internal {
-bool ShouldUseColor(bool stdout_is_tty);
-} // namespace internal
-} // namespace testing
+// Indicates that this translation unit is part of Google Test's
+// implementation. It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error. This trick is to
+// prevent a user from accidentally including gtest-internal-inl.h in
+// his code.
+#define GTEST_IMPLEMENTATION_ 1
+#include "src/gtest-internal-inl.h"
+#undef GTEST_IMPLEMENTATION_
using testing::internal::ShouldUseColor;
diff --git a/gtest/test/gtest_env_var_test.py b/gtest/test/gtest_env_var_test.py
index f8250d4..bcc0bfd 100755
--- a/gtest/test/gtest_env_var_test.py
+++ b/gtest/test/gtest_env_var_test.py
@@ -42,6 +42,8 @@ IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
+environ = os.environ.copy()
+
def AssertEq(expected, actual):
if expected != actual:
@@ -54,9 +56,9 @@ def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
- os.environ[env_var] = value
- elif env_var in os.environ:
- del os.environ[env_var]
+ environ[env_var] = value
+ elif env_var in environ:
+ del environ[env_var]
def GetFlag(flag):
@@ -65,7 +67,7 @@ def GetFlag(flag):
args = [COMMAND]
if flag is not None:
args += [flag]
- return gtest_test_utils.Subprocess(args).output
+ return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
diff --git a/gtest/test/gtest_filter_unittest.py b/gtest/test/gtest_filter_unittest.py
index a94a521..0d1a770 100755
--- a/gtest/test/gtest_filter_unittest.py
+++ b/gtest/test/gtest_filter_unittest.py
@@ -45,11 +45,42 @@ __author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
+import sys
+
import gtest_test_utils
# Constants.
-IS_WINDOWS = os.name == 'nt'
+# Checks if this platform can pass empty environment variables to child
+# processes. We set an env variable to an empty string and invoke a python
+# script in a subprocess to print whether the variable is STILL in
+# os.environ. We then use 'eval' to parse the child's output so that an
+# exception is thrown if the input is anything other than 'True' nor 'False'.
+os.environ['EMPTY_VAR'] = ''
+child = gtest_test_utils.Subprocess(
+ [sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
+CAN_PASS_EMPTY_ENV = eval(child.output)
+
+
+# Check if this platform can unset environment variables in child processes.
+# We set an env variable to a non-empty string, unset it, and invoke
+# a python script in a subprocess to print whether the variable
+# is NO LONGER in os.environ.
+# We use 'eval' to parse the child's output so that an exception
+# is thrown if the input is neither 'True' nor 'False'.
+os.environ['UNSET_VAR'] = 'X'
+del os.environ['UNSET_VAR']
+child = gtest_test_utils.Subprocess(
+ [sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
+CAN_UNSET_ENV = eval(child.output)
+
+
+# Checks if we should test with an empty filter. This doesn't
+# make sense on platforms that cannot pass empty env variables (Win32)
+# and on platforms that cannot unset variables (since we cannot tell
+# the difference between "" and NULL -- Borland and Solaris < 5.10)
+CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
+
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
@@ -77,6 +108,14 @@ TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
+# The command line flag to tell Google Test to output the list of tests it
+# will run.
+LIST_TESTS_FLAG = '--gtest_list_tests'
+
+# Indicates whether Google Test supports death tests.
+SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
+ [COMMAND, LIST_TESTS_FLAG]).output
+
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
@@ -98,6 +137,14 @@ DISABLED_TESTS = [
'DISABLED_FoobarbazTest.TestA',
]
+if SUPPORTS_DEATH_TESTS:
+ DEATH_TESTS = [
+ 'HasDeathTest.Test1',
+ 'HasDeathTest.Test2',
+ ]
+else:
+ DEATH_TESTS = []
+
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
@@ -110,35 +157,35 @@ ACTIVE_TESTS = [
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
-
- 'HasDeathTest.Test1',
- 'HasDeathTest.Test2',
- ] + PARAM_TESTS
+ ] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
+environ = os.environ.copy()
+
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
- os.environ[env_var] = value
- elif env_var in os.environ:
- del os.environ[env_var]
+ environ[env_var] = value
+ elif env_var in environ:
+ del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
- return gtest_test_utils.Subprocess([COMMAND] + (args or [])).output
+ return gtest_test_utils.Subprocess([COMMAND] + (args or []),
+ env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
- p = gtest_test_utils.Subprocess([COMMAND] + (args or []))
+ p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
@@ -157,15 +204,12 @@ def RunAndExtractTestList(args = None):
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
- original_env = os.environ.copy()
- os.environ.update(extra_env)
+ original_env = environ.copy()
+ environ.update(extra_env)
return function(*args, **kwargs)
finally:
- for key in extra_env.iterkeys():
- if key in original_env:
- os.environ[key] = original_env[key]
- else:
- del os.environ[key]
+ environ.clear()
+ environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
@@ -179,7 +223,7 @@ def RunWithSharding(total_shards, shard_index, command):
class GTestFilterUnitTest(gtest_test_utils.TestCase):
- """Tests GTEST_FILTER env variable or --gtest_filter flag to filter tests."""
+ """Tests the env variable or the command line flag to filter tests."""
# Utilities.
@@ -211,26 +255,26 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
- """Checks that the binary runs correct set of tests for the given filter."""
+ """Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
- # First, tests using GTEST_FILTER.
+ # First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
- # to a new process. This means it is impossible to pass an empty filter
- # into a process using the GTEST_FILTER environment variable. However,
- # we can still test the case when the variable is not supplied (i.e.,
- # gtest_filter is None).
+ # to a new process. This means it is impossible to pass an empty filter
+ # into a process using the environment variable. However, we can still
+ # test the case when the variable is not supplied (i.e., gtest_filter is
+ # None).
# pylint: disable-msg=C6403
- if not IS_WINDOWS or gtest_filter != '':
+ if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
- # Next, tests using --gtest_filter.
+ # Next, tests using the command line flag.
if gtest_filter is None:
args = []
@@ -260,12 +304,12 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
- # to a new process. This means it is impossible to pass an empty filter
- # into a process using the GTEST_FILTER environment variable. However,
- # we can still test the case when the variable is not supplied (i.e.,
- # gtest_filter is None).
+ # to a new process. This means it is impossible to pass an empty filter
+ # into a process using the environment variable. However, we can still
+ # test the case when the variable is not supplied (i.e., gtest_filter is
+ # None).
# pylint: disable-msg=C6403
- if not IS_WINDOWS or gtest_filter != '':
+ if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
@@ -404,10 +448,7 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
'BazTest.TestOne',
'BazTest.TestA',
- 'BazTest.TestB',
-
- 'HasDeathTest.Test1',
- 'HasDeathTest.Test2', ] + PARAM_TESTS)
+ 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
@@ -468,7 +509,7 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
])
def testNegativeFilters(self):
- self.RunAndVerify('*-HasDeathTest.Test1', [
+ self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
@@ -476,24 +517,17 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
'BarTest.TestTwo',
'BarTest.TestThree',
- 'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
+ ] + DEATH_TESTS + PARAM_TESTS)
- 'HasDeathTest.Test2',
- ] + PARAM_TESTS)
-
- self.RunAndVerify('*-FooTest.Abc:HasDeathTest.*', [
+ self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
-
- 'BazTest.TestOne',
- 'BazTest.TestA',
- 'BazTest.TestB',
- ] + PARAM_TESTS)
+ ] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
@@ -501,15 +535,11 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
])
# Tests without leading '*'.
- self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:HasDeathTest.*', [
+ self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
-
- 'BazTest.TestOne',
- 'BazTest.TestA',
- 'BazTest.TestB',
- ] + PARAM_TESTS)
+ ] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
@@ -555,7 +585,7 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
- """Tests that the shard file is created with --gtest_list_tests."""
+ """Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
@@ -563,32 +593,41 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
- InvokeWithModifiedEnv(extra_env,
- RunAndReturnOutput,
- ['--gtest_list_tests'])
+ output = InvokeWithModifiedEnv(extra_env,
+ RunAndReturnOutput,
+ [LIST_TESTS_FLAG])
finally:
+ # This assertion ensures that Google Test enumerated the tests as
+ # opposed to running them.
+ self.assert_('[==========]' not in output,
+ 'Unexpected output during test enumeration.\n'
+ 'Please ensure that LIST_TESTS_FLAG is assigned the\n'
+ 'correct flag value for listing Google Test tests.')
+
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
- def testShardingWorksWithDeathTests(self):
- """Tests integration with death tests and sharding."""
- gtest_filter = 'HasDeathTest.*:SeqP/*'
- expected_tests = [
- 'HasDeathTest.Test1',
- 'HasDeathTest.Test2',
-
- 'SeqP/ParamTest.TestX/0',
- 'SeqP/ParamTest.TestX/1',
- 'SeqP/ParamTest.TestY/0',
- 'SeqP/ParamTest.TestY/1',
- ]
-
- for flag in ['--gtest_death_test_style=threadsafe',
- '--gtest_death_test_style=fast']:
- self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
- check_exit_0=True, args=[flag])
- self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
- check_exit_0=True, args=[flag])
+ if SUPPORTS_DEATH_TESTS:
+ def testShardingWorksWithDeathTests(self):
+ """Tests integration with death tests and sharding."""
+
+ gtest_filter = 'HasDeathTest.*:SeqP/*'
+ expected_tests = [
+ 'HasDeathTest.Test1',
+ 'HasDeathTest.Test2',
+
+ 'SeqP/ParamTest.TestX/0',
+ 'SeqP/ParamTest.TestX/1',
+ 'SeqP/ParamTest.TestY/0',
+ 'SeqP/ParamTest.TestY/1',
+ ]
+
+ for flag in ['--gtest_death_test_style=threadsafe',
+ '--gtest_death_test_style=fast']:
+ self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
+ check_exit_0=True, args=[flag])
+ self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
+ check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
diff --git a/gtest/test/gtest_help_test.py b/gtest/test/gtest_help_test.py
index 91081ad..3cb4c48 100755
--- a/gtest/test/gtest_help_test.py
+++ b/gtest/test/gtest_help_test.py
@@ -50,6 +50,15 @@ PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
+UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
+LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
+INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
+ re.sub('^--', '/', LIST_TESTS_FLAG),
+ re.sub('_', '-', LIST_TESTS_FLAG)]
+INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
+
+SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
+ [PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
@@ -88,18 +97,41 @@ class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
- """Verifies that the right message is printed and the tests are
- skipped when the given flag is specified."""
+ """Verifies correct behavior when help flag is specified.
+
+ The right message must be printed and the tests must
+ skipped when the given flag is specified.
+
+ Args:
+ flag: A flag to pass to the binary or None.
+ """
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_WINDOWS:
self.assert_(CATCH_EXCEPTIONS_FLAG in output, output)
- self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
else:
self.assert_(CATCH_EXCEPTIONS_FLAG not in output, output)
+
+ if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
+ else:
+ self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
+
+ def TestNonHelpFlag(self, flag):
+ """Verifies correct behavior when no help flag is specified.
+
+ Verifies that when no help flag is specified, the tests are run
+ and the help message is not printed.
+
+ Args:
+ flag: A flag to pass to the binary or None.
+ """
+
+ exit_code, output = RunWithFlag(flag)
+ self.assert_(exit_code != 0)
+ self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
@@ -113,13 +145,24 @@ class GTestHelpTest(gtest_test_utils.TestCase):
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
+ def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
+ self.TestHelpFlag(UNKNOWN_FLAG)
+
+ def testPrintsHelpWithIncorrectFlagStyle(self):
+ for incorrect_flag in INCORRECT_FLAG_VARIANTS:
+ self.TestHelpFlag(incorrect_flag)
+
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
- exit_code, output = RunWithFlag(None)
- self.assert_(exit_code != 0)
- self.assert_(not HELP_REGEX.search(output), output)
+ self.TestNonHelpFlag(None)
+
+ def testRunsTestsWithGtestInternalFlag(self):
+ """Verifies that the tests are run and no help message is printed when
+ a flag starting with Google Test prefix and 'internal_' is supplied."""
+
+ self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
diff --git a/gtest/test/gtest_help_test_.cc b/gtest/test/gtest_help_test_.cc
index 0282bc8..aad0d72 100644
--- a/gtest/test/gtest_help_test_.cc
+++ b/gtest/test/gtest_help_test_.cc
@@ -40,3 +40,7 @@
TEST(HelpFlagTest, ShouldNotBeRun) {
ASSERT_TRUE(false) << "Tests shouldn't be run when --help is specified.";
}
+
+#if GTEST_HAS_DEATH_TEST
+TEST(DeathTest, UsedByPythonScriptToDetectSupportForDeathTestsInThisBinary) {}
+#endif
diff --git a/gtest/test/gtest_nc.cc b/gtest/test/gtest_nc.cc
deleted file mode 100644
index 73b5db6..0000000
--- a/gtest/test/gtest_nc.cc
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2007, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan@google.com (Zhanyong Wan)
-
-// This file is the input to a negative-compilation test for Google
-// Test. Code here is NOT supposed to compile. Its purpose is to
-// verify that certain incorrect usages of the Google Test API are
-// indeed rejected by the compiler.
-//
-// We still need to write the negative-compilation test itself, which
-// will be tightly coupled with the build environment.
-//
-// TODO(wan@google.com): finish the negative-compilation test.
-
-#ifdef TEST_CANNOT_IGNORE_RUN_ALL_TESTS_RESULT
-// Tests that the result of RUN_ALL_TESTS() cannot be ignored.
-
-#include <gtest/gtest.h>
-
-int main(int argc, char** argv) {
- testing::InitGoogleTest(&argc, argv);
- RUN_ALL_TESTS(); // This line shouldn't compile.
-}
-
-#elif defined(TEST_USER_CANNOT_INCLUDE_GTEST_INTERNAL_INL_H)
-// Tests that a user cannot include gtest-internal-inl.h in his code.
-
-#include "src/gtest-internal-inl.h"
-
-#elif defined(TEST_CATCHES_DECLARING_SETUP_IN_TEST_FIXTURE_WITH_TYPO)
-// Tests that the compiler catches the typo when a user declares a
-// Setup() method in a test fixture.
-
-#include <gtest/gtest.h>
-
-class MyTest : public testing::Test {
- protected:
- void Setup() {}
-};
-
-#elif defined(TEST_CATCHES_CALLING_SETUP_IN_TEST_WITH_TYPO)
-// Tests that the compiler catches the typo when a user calls Setup()
-// from a test fixture.
-
-#include <gtest/gtest.h>
-
-class MyTest : public testing::Test {
- protected:
- virtual void SetUp() {
- testing::Test::Setup(); // Tries to call SetUp() in the parent class.
- }
-};
-
-#elif defined(TEST_CATCHES_DECLARING_SETUP_IN_ENVIRONMENT_WITH_TYPO)
-// Tests that the compiler catches the typo when a user declares a
-// Setup() method in a subclass of Environment.
-
-#include <gtest/gtest.h>
-
-class MyEnvironment : public testing::Environment {
- public:
- void Setup() {}
-};
-
-#elif defined(TEST_CATCHES_CALLING_SETUP_IN_ENVIRONMENT_WITH_TYPO)
-// Tests that the compiler catches the typo when a user calls Setup()
-// in an Environment.
-
-#include <gtest/gtest.h>
-
-class MyEnvironment : public testing::Environment {
- protected:
- virtual void SetUp() {
- // Tries to call SetUp() in the parent class.
- testing::Environment::Setup();
- }
-};
-
-#elif defined(TEST_CATCHES_WRONG_CASE_IN_TYPED_TEST_P)
-// Tests that the compiler catches using the wrong test case name in
-// TYPED_TEST_P.
-
-#include <gtest/gtest.h>
-
-template <typename T>
-class FooTest : public testing::Test {
-};
-
-template <typename T>
-class BarTest : public testing::Test {
-};
-
-TYPED_TEST_CASE_P(FooTest);
-TYPED_TEST_P(BarTest, A) {} // Wrong test case name.
-REGISTER_TYPED_TEST_CASE_P(FooTest, A);
-INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, testing::Types<int>);
-
-#elif defined(TEST_CATCHES_WRONG_CASE_IN_REGISTER_TYPED_TEST_CASE_P)
-// Tests that the compiler catches using the wrong test case name in
-// REGISTER_TYPED_TEST_CASE_P.
-
-#include <gtest/gtest.h>
-
-template <typename T>
-class FooTest : public testing::Test {
-};
-
-template <typename T>
-class BarTest : public testing::Test {
-};
-
-TYPED_TEST_CASE_P(FooTest);
-TYPED_TEST_P(FooTest, A) {}
-REGISTER_TYPED_TEST_CASE_P(BarTest, A); // Wrong test case name.
-INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, testing::Types<int>);
-
-#elif defined(TEST_CATCHES_WRONG_CASE_IN_INSTANTIATE_TYPED_TEST_CASE_P)
-// Tests that the compiler catches using the wrong test case name in
-// INSTANTIATE_TYPED_TEST_CASE_P.
-
-#include <gtest/gtest.h>
-
-template <typename T>
-class FooTest : public testing::Test {
-};
-
-template <typename T>
-class BarTest : public testing::Test {
-};
-
-TYPED_TEST_CASE_P(FooTest);
-TYPED_TEST_P(FooTest, A) {}
-REGISTER_TYPED_TEST_CASE_P(FooTest, A);
-
-// Wrong test case name.
-INSTANTIATE_TYPED_TEST_CASE_P(My, BarTest, testing::Types<int>);
-
-#elif defined(TEST_CATCHES_INSTANTIATE_TYPED_TESET_CASE_P_WITH_SAME_NAME_PREFIX)
-// Tests that the compiler catches instantiating TYPED_TEST_CASE_P
-// twice with the same name prefix.
-
-#include <gtest/gtest.h>
-
-template <typename T>
-class FooTest : public testing::Test {
-};
-
-TYPED_TEST_CASE_P(FooTest);
-TYPED_TEST_P(FooTest, A) {}
-REGISTER_TYPED_TEST_CASE_P(FooTest, A);
-
-INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, testing::Types<int>);
-
-// Wrong name prefix: "My" has been used.
-INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, testing::Types<double>);
-
-#elif defined(TEST_STATIC_ASSERT_TYPE_EQ_IS_NOT_A_TYPE)
-
-#include <gtest/gtest.h>
-
-// Tests that StaticAssertTypeEq<T1, T2> cannot be used as a type.
-testing::StaticAssertTypeEq<int, int> dummy;
-
-#elif defined(TEST_STATIC_ASSERT_TYPE_EQ_WORKS_IN_NAMESPACE)
-
-#include <gtest/gtest.h>
-
-// Tests that StaticAssertTypeEq<T1, T2> works in a namespace scope.
-static bool dummy = testing::StaticAssertTypeEq<int, const int>();
-
-#elif defined(TEST_STATIC_ASSERT_TYPE_EQ_WORKS_IN_CLASS)
-
-#include <gtest/gtest.h>
-
-template <typename T>
-class Helper {
- public:
- // Tests that StaticAssertTypeEq<T1, T2> works in a class.
- Helper() { testing::StaticAssertTypeEq<int, T>(); }
-
- void DoSomething() {}
-};
-
-void Test() {
- Helper<bool> h;
- h.DoSomething(); // To avoid the "unused variable" warning.
-}
-
-#elif defined(TEST_STATIC_ASSERT_TYPE_EQ_WORKS_IN_FUNCTION)
-
-#include <gtest/gtest.h>
-
-void Test() {
- // Tests that StaticAssertTypeEq<T1, T2> works inside a function.
- testing::StaticAssertTypeEq<const int, int>();
-}
-
-#else
-// A sanity test. This should compile.
-
-#include <gtest/gtest.h>
-
-int main() {
- return RUN_ALL_TESTS();
-}
-
-#endif
diff --git a/gtest/test/gtest_nc_test.py b/gtest/test/gtest_nc_test.py
deleted file mode 100755
index 06ffb3f..0000000
--- a/gtest/test/gtest_nc_test.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Negative compilation test for Google Test."""
-
-__author__ = 'wan@google.com (Zhanyong Wan)'
-
-import os
-import sys
-import unittest
-
-
-IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
-if not IS_LINUX:
- sys.exit(0) # Negative compilation tests are not supported on Windows & Mac.
-
-
-class GTestNCTest(unittest.TestCase):
- """Negative compilation test for Google Test."""
-
- def testCompilerError(self):
- """Verifies that erroneous code leads to expected compiler
- messages."""
-
- # Defines a list of test specs, where each element is a tuple
- # (test name, list of regexes for matching the compiler errors).
- test_specs = [
- ('CANNOT_IGNORE_RUN_ALL_TESTS_RESULT',
- [r'ignoring return value']),
-
- ('USER_CANNOT_INCLUDE_GTEST_INTERNAL_INL_H',
- [r'must not be included except by Google Test itself']),
-
- ('CATCHES_DECLARING_SETUP_IN_TEST_FIXTURE_WITH_TYPO',
- [r'Setup_should_be_spelled_SetUp']),
-
- ('CATCHES_CALLING_SETUP_IN_TEST_WITH_TYPO',
- [r'Setup_should_be_spelled_SetUp']),
-
- ('CATCHES_DECLARING_SETUP_IN_ENVIRONMENT_WITH_TYPO',
- [r'Setup_should_be_spelled_SetUp']),
-
- ('CATCHES_CALLING_SETUP_IN_ENVIRONMENT_WITH_TYPO',
- [r'Setup_should_be_spelled_SetUp']),
-
- ('CATCHES_WRONG_CASE_IN_TYPED_TEST_P',
- [r'BarTest.*was not declared']),
-
- ('CATCHES_WRONG_CASE_IN_REGISTER_TYPED_TEST_CASE_P',
- [r'BarTest.*was not declared']),
-
- ('CATCHES_WRONG_CASE_IN_INSTANTIATE_TYPED_TEST_CASE_P',
- [r'BarTest.*not declared']),
-
- ('CATCHES_INSTANTIATE_TYPED_TESET_CASE_P_WITH_SAME_NAME_PREFIX',
- [r'redefinition of.*My.*FooTest']),
-
- ('STATIC_ASSERT_TYPE_EQ_IS_NOT_A_TYPE',
- [r'StaticAssertTypeEq.* does not name a type']),
-
- ('STATIC_ASSERT_TYPE_EQ_WORKS_IN_NAMESPACE',
- [r'StaticAssertTypeEq.*int.*const int']),
-
- ('STATIC_ASSERT_TYPE_EQ_WORKS_IN_CLASS',
- [r'StaticAssertTypeEq.*int.*bool']),
-
- ('STATIC_ASSERT_TYPE_EQ_WORKS_IN_FUNCTION',
- [r'StaticAssertTypeEq.*const int.*int']),
-
- ('SANITY',
- None)
- ]
-
- # TODO(wan@google.com): verify that the test specs are satisfied.
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/gtest/test/gtest_output_test.py b/gtest/test/gtest_output_test.py
index c8a38f5..192030a 100755
--- a/gtest/test/gtest_output_test.py
+++ b/gtest/test/gtest_output_test.py
@@ -48,6 +48,7 @@ import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
+CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
@@ -123,18 +124,32 @@ def RemoveTime(output):
return re.sub(r'\(\d+ ms', '(? ms', output)
+def RemoveTypeInfoDetails(test_output):
+ """Removes compiler-specific type info from Google Test program's output.
+
+ Args:
+ test_output: the output of a Google Test program.
+
+ Returns:
+ output with type information normalized to canonical form.
+ """
+
+ # some compilers output the name of type 'unsigned int' as 'unsigned'
+ return re.sub(r'unsigned int', 'unsigned', test_output)
+
+
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
- output = re.sub(r'\d+ tests, listed below',
+ output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
- output = re.sub(r'\d+ tests from \d+ test cases',
+ output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
- output = re.sub(r'\d+ tests from ([a-zA-Z_])',
+ output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
- return re.sub(r'\d+ tests\.', '? tests.', output)
+ return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
@@ -184,16 +199,9 @@ def GetShellCommandOutput(env_cmd):
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
- old_env_vars = dict(os.environ)
- os.environ.update(env_cmd[0])
- p = gtest_test_utils.Subprocess(env_cmd[1])
-
- # Changes made by os.environ.clear are not inheritable by child processes
- # until Python 2.6. To produce inheritable changes we have to delete
- # environment items with the del statement.
- for key in os.environ.keys():
- del os.environ[key]
- os.environ.update(old_env_vars)
+ environ = os.environ.copy()
+ environ.update(env_cmd[0])
+ p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
@@ -209,8 +217,10 @@ def GetCommandOutput(env_cmd):
"""
# Disables exception pop-ups on Windows.
- os.environ['GTEST_CATCH_EXCEPTIONS'] = '1'
- return NormalizeOutput(GetShellCommandOutput(env_cmd))
+ environ, cmdline = env_cmd
+ environ = dict(environ) # Ensures we are modifying a copy.
+ environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
+ return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
@@ -228,7 +238,9 @@ SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
-CAN_GENERATE_GOLDEN_FILE = SUPPORTS_DEATH_TESTS and SUPPORTS_TYPED_TESTS
+CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
+ SUPPORTS_TYPED_TESTS and
+ SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
@@ -237,6 +249,8 @@ class GTestOutputTest(gtest_test_utils.TestCase):
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
+ test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
+ test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
@@ -262,24 +276,30 @@ class GTestOutputTest(gtest_test_utils.TestCase):
# We want the test to pass regardless of certain features being
# supported or not.
+
+ # We still have to remove type name specifics in all cases.
+ normalized_actual = RemoveTypeInfoDetails(output)
+ normalized_golden = RemoveTypeInfoDetails(golden)
+
if CAN_GENERATE_GOLDEN_FILE:
- self.assert_(golden == output)
+ self.assertEqual(normalized_golden, normalized_actual)
else:
- normalized_actual = RemoveTestCounts(output)
- normalized_golden = RemoveTestCounts(self.RemoveUnsupportedTests(golden))
+ normalized_actual = RemoveTestCounts(normalized_actual)
+ normalized_golden = RemoveTestCounts(self.RemoveUnsupportedTests(
+ normalized_golden))
- # This code is very handy when debugging test differences so I left it
- # here, commented.
- # open(os.path.join(
- # gtest_test_utils.GetSourceDir(),
- # '_gtest_output_test_normalized_actual.txt'), 'wb').write(
- # normalized_actual)
- # open(os.path.join(
- # gtest_test_utils.GetSourceDir(),
- # '_gtest_output_test_normalized_golden.txt'), 'wb').write(
- # normalized_golden)
+ # This code is very handy when debugging golden file differences:
+ if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
+ open(os.path.join(
+ gtest_test_utils.GetSourceDir(),
+ '_gtest_output_test_normalized_actual.txt'), 'wb').write(
+ normalized_actual)
+ open(os.path.join(
+ gtest_test_utils.GetSourceDir(),
+ '_gtest_output_test_normalized_golden.txt'), 'wb').write(
+ normalized_golden)
- self.assert_(normalized_golden == normalized_actual)
+ self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
@@ -298,8 +318,8 @@ that does not support all the required features (death tests""")
"""\nand typed tests). Please check that you are using VC++ 8.0 SP1
or higher as your compiler.""")
else:
- message += """\nand typed tests). Please generate the golden file
-using a binary built with those features enabled."""
+ message += """\ntyped tests, and threads). Please generate the
+golden file using a binary built with those features enabled."""
sys.stderr.write(message)
sys.exit(1)
diff --git a/gtest/test/gtest_output_test_.cc b/gtest/test/gtest_output_test_.cc
index 6d75602..273e8e9 100644
--- a/gtest/test/gtest_output_test_.cc
+++ b/gtest/test/gtest_output_test_.cc
@@ -46,15 +46,17 @@
#include <stdlib.h>
-#if GTEST_HAS_PTHREAD
-#include <pthread.h>
-#endif // GTEST_HAS_PTHREAD
-
+#if GTEST_IS_THREADSAFE
using testing::ScopedFakeTestPartResultReporter;
using testing::TestPartResultArray;
+using testing::internal::Notification;
+using testing::internal::ThreadWithParam;
+#endif
+
namespace posix = ::testing::internal::posix;
using testing::internal::String;
+using testing::internal::scoped_ptr;
// Tests catching fatal failures.
@@ -214,6 +216,83 @@ TEST(SCOPED_TRACETest, CanBeRepeated) {
<< "trace point A, B, and D.";
}
+#if GTEST_IS_THREADSAFE
+// Tests that SCOPED_TRACE()s can be used concurrently from multiple
+// threads. Namely, an assertion should be affected by
+// SCOPED_TRACE()s in its own thread only.
+
+// Here's the sequence of actions that happen in the test:
+//
+// Thread A (main) | Thread B (spawned)
+// ===============================|================================
+// spawns thread B |
+// -------------------------------+--------------------------------
+// waits for n1 | SCOPED_TRACE("Trace B");
+// | generates failure #1
+// | notifies n1
+// -------------------------------+--------------------------------
+// SCOPED_TRACE("Trace A"); | waits for n2
+// generates failure #2 |
+// notifies n2 |
+// -------------------------------|--------------------------------
+// waits for n3 | generates failure #3
+// | trace B dies
+// | generates failure #4
+// | notifies n3
+// -------------------------------|--------------------------------
+// generates failure #5 | finishes
+// trace A dies |
+// generates failure #6 |
+// -------------------------------|--------------------------------
+// waits for thread B to finish |
+
+struct CheckPoints {
+ Notification n1;
+ Notification n2;
+ Notification n3;
+};
+
+static void ThreadWithScopedTrace(CheckPoints* check_points) {
+ {
+ SCOPED_TRACE("Trace B");
+ ADD_FAILURE()
+ << "Expected failure #1 (in thread B, only trace B alive).";
+ check_points->n1.Notify();
+ check_points->n2.WaitForNotification();
+
+ ADD_FAILURE()
+ << "Expected failure #3 (in thread B, trace A & B both alive).";
+ } // Trace B dies here.
+ ADD_FAILURE()
+ << "Expected failure #4 (in thread B, only trace A alive).";
+ check_points->n3.Notify();
+}
+
+TEST(SCOPED_TRACETest, WorksConcurrently) {
+ printf("(expecting 6 failures)\n");
+
+ CheckPoints check_points;
+ ThreadWithParam<CheckPoints*> thread(&ThreadWithScopedTrace,
+ &check_points,
+ NULL);
+ check_points.n1.WaitForNotification();
+
+ {
+ SCOPED_TRACE("Trace A");
+ ADD_FAILURE()
+ << "Expected failure #2 (in thread A, trace A & B both alive).";
+ check_points.n2.Notify();
+ check_points.n3.WaitForNotification();
+
+ ADD_FAILURE()
+ << "Expected failure #5 (in thread A, only trace A alive).";
+ } // Trace A dies here.
+ ADD_FAILURE()
+ << "Expected failure #6 (in thread A, no trace alive).";
+ thread.Join();
+}
+#endif // GTEST_IS_THREADSAFE
+
TEST(DisabledTestsWarningTest,
DISABLED_AlsoRunDisabledTestsFlagSuppressesWarning) {
// This test body is intentionally empty. Its sole purpose is for
@@ -479,6 +558,63 @@ TEST_F(ExceptionInTearDownTest, ExceptionInTearDown) {
#endif // GTEST_OS_WINDOWS
+#if GTEST_IS_THREADSAFE
+
+// A unary function that may die.
+void DieIf(bool should_die) {
+ GTEST_CHECK_(!should_die) << " - death inside DieIf().";
+}
+
+// Tests running death tests in a multi-threaded context.
+
+// Used for coordination between the main and the spawn thread.
+struct SpawnThreadNotifications {
+ SpawnThreadNotifications() {}
+
+ Notification spawn_thread_started;
+ Notification spawn_thread_ok_to_terminate;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(SpawnThreadNotifications);
+};
+
+// The function to be executed in the thread spawn by the
+// MultipleThreads test (below).
+static void ThreadRoutine(SpawnThreadNotifications* notifications) {
+ // Signals the main thread that this thread has started.
+ notifications->spawn_thread_started.Notify();
+
+ // Waits for permission to finish from the main thread.
+ notifications->spawn_thread_ok_to_terminate.WaitForNotification();
+}
+
+// This is a death-test test, but it's not named with a DeathTest
+// suffix. It starts threads which might interfere with later
+// death tests, so it must run after all other death tests.
+class DeathTestAndMultiThreadsTest : public testing::Test {
+ protected:
+ // Starts a thread and waits for it to begin.
+ virtual void SetUp() {
+ thread_.reset(new ThreadWithParam<SpawnThreadNotifications*>(
+ &ThreadRoutine, &notifications_, NULL));
+ notifications_.spawn_thread_started.WaitForNotification();
+ }
+ // Tells the thread to finish, and reaps it.
+ // Depending on the version of the thread library in use,
+ // a manager thread might still be left running that will interfere
+ // with later death tests. This is unfortunate, but this class
+ // cleans up after itself as best it can.
+ virtual void TearDown() {
+ notifications_.spawn_thread_ok_to_terminate.Notify();
+ }
+
+ private:
+ SpawnThreadNotifications notifications_;
+ scoped_ptr<ThreadWithParam<SpawnThreadNotifications*> > thread_;
+};
+
+#endif // GTEST_IS_THREADSAFE
+
// The MixedUpTestCaseTest test case verifies that Google Test will fail a
// test if it uses a different fixture class than what other tests in
// the same test case use. It deliberately contains two fixture
@@ -849,23 +985,13 @@ TEST_F(ExpectFailureTest, ExpectNonFatalFailure) {
"failure.");
}
-#if GTEST_IS_THREADSAFE && GTEST_HAS_PTHREAD
+#if GTEST_IS_THREADSAFE
class ExpectFailureWithThreadsTest : public ExpectFailureTest {
protected:
static void AddFailureInOtherThread(FailureMode failure) {
- pthread_t tid;
- pthread_create(&tid,
- NULL,
- ExpectFailureWithThreadsTest::FailureThread,
- &failure);
- pthread_join(tid, NULL);
- }
- private:
- static void* FailureThread(void* attr) {
- FailureMode* failure = static_cast<FailureMode*>(attr);
- AddFailure(*failure);
- return NULL;
+ ThreadWithParam<FailureMode> thread(&AddFailure, failure, NULL);
+ thread.Join();
}
};
@@ -901,7 +1027,7 @@ TEST_F(ScopedFakeTestPartResultReporterTest, InterceptOnlyCurrentThread) {
EXPECT_EQ(0, results.size()) << "This shouldn't fail.";
}
-#endif // GTEST_IS_THREADSAFE && GTEST_HAS_PTHREAD
+#endif // GTEST_IS_THREADSAFE
TEST_F(ExpectFailureTest, ExpectFatalFailureOnAllThreads) {
// Expected fatal failure, but succeeds.
diff --git a/gtest/test/gtest_output_test_golden_lin.txt b/gtest/test/gtest_output_test_golden_lin.txt
index 51bae52..ec60437 100644
--- a/gtest/test/gtest_output_test_golden_lin.txt
+++ b/gtest/test/gtest_output_test_golden_lin.txt
@@ -7,7 +7,7 @@ Expected: true
gtest_output_test_.cc:#: Failure
Value of: 3
Expected: 2
-[==========] Running 56 tests from 23 test cases.
+[==========] Running 60 tests from 25 test cases.
[----------] Global test environment set-up.
FooEnvironment::SetUp() called.
BarEnvironment::SetUp() called.
@@ -65,7 +65,7 @@ i == 3
gtest_output_test_.cc:#: Failure
Expected: (3) >= (a[i]), actual: 3 vs 6
[ FAILED ] LoggingTest.InterleavingLoggingAndAssertions
-[----------] 5 tests from SCOPED_TRACETest
+[----------] 6 tests from SCOPED_TRACETest
[ RUN ] SCOPED_TRACETest.ObeysScopes
(expected to fail)
gtest_output_test_.cc:#: Failure
@@ -148,6 +148,35 @@ gtest_output_test_.cc:#: D
gtest_output_test_.cc:#: B
gtest_output_test_.cc:#: A
[ FAILED ] SCOPED_TRACETest.CanBeRepeated
+[ RUN ] SCOPED_TRACETest.WorksConcurrently
+(expecting 6 failures)
+gtest_output_test_.cc:#: Failure
+Failed
+Expected failure #1 (in thread B, only trace B alive).
+Google Test trace:
+gtest_output_test_.cc:#: Trace B
+gtest_output_test_.cc:#: Failure
+Failed
+Expected failure #2 (in thread A, trace A & B both alive).
+Google Test trace:
+gtest_output_test_.cc:#: Trace A
+gtest_output_test_.cc:#: Failure
+Failed
+Expected failure #3 (in thread B, trace A & B both alive).
+Google Test trace:
+gtest_output_test_.cc:#: Trace B
+gtest_output_test_.cc:#: Failure
+Failed
+Expected failure #4 (in thread B, only trace A alive).
+gtest_output_test_.cc:#: Failure
+Failed
+Expected failure #5 (in thread A, only trace A alive).
+Google Test trace:
+gtest_output_test_.cc:#: Trace A
+gtest_output_test_.cc:#: Failure
+Failed
+Expected failure #6 (in thread A, no trace alive).
+[ FAILED ] SCOPED_TRACETest.WorksConcurrently
[----------] 1 test from NonFatalFailureInFixtureConstructorTest
[ RUN ] NonFatalFailureInFixtureConstructorTest.FailureInConstructor
(expecting 5 failures)
@@ -506,6 +535,35 @@ Failed
Expected non-fatal failure.
[ FAILED ] ExpectFailureTest.ExpectNonFatalFailureOnAllThreads
+[----------] 2 tests from ExpectFailureWithThreadsTest
+[ RUN ] ExpectFailureWithThreadsTest.ExpectFatalFailure
+(expecting 2 failures)
+gtest_output_test_.cc:#: Failure
+Failed
+Expected fatal failure.
+gtest.cc:#: Failure
+Expected: 1 fatal failure
+ Actual: 0 failures
+[ FAILED ] ExpectFailureWithThreadsTest.ExpectFatalFailure
+[ RUN ] ExpectFailureWithThreadsTest.ExpectNonFatalFailure
+(expecting 2 failures)
+gtest_output_test_.cc:#: Failure
+Failed
+Expected non-fatal failure.
+gtest.cc:#: Failure
+Expected: 1 non-fatal failure
+ Actual: 0 failures
+[ FAILED ] ExpectFailureWithThreadsTest.ExpectNonFatalFailure
+[----------] 1 test from ScopedFakeTestPartResultReporterTest
+[ RUN ] ScopedFakeTestPartResultReporterTest.InterceptOnlyCurrentThread
+(expecting 2 failures)
+gtest_output_test_.cc:#: Failure
+Failed
+Expected fatal failure.
+gtest_output_test_.cc:#: Failure
+Failed
+Expected non-fatal failure.
+[ FAILED ] ScopedFakeTestPartResultReporterTest.InterceptOnlyCurrentThread
[----------] Global test environment tear-down
BarEnvironment::TearDown() called.
gtest_output_test_.cc:#: Failure
@@ -515,9 +573,9 @@ FooEnvironment::TearDown() called.
gtest_output_test_.cc:#: Failure
Failed
Expected fatal failure.
-[==========] 56 tests from 23 test cases ran.
+[==========] 60 tests from 25 test cases ran.
[ PASSED ] 21 tests.
-[ FAILED ] 35 tests, listed below:
+[ FAILED ] 39 tests, listed below:
[ FAILED ] FatalFailureTest.FatalFailureInSubroutine
[ FAILED ] FatalFailureTest.FatalFailureInNestedSubroutine
[ FAILED ] FatalFailureTest.NonfatalFailureInSubroutine
@@ -527,6 +585,7 @@ Expected fatal failure.
[ FAILED ] SCOPED_TRACETest.WorksInSubroutine
[ FAILED ] SCOPED_TRACETest.CanBeNested
[ FAILED ] SCOPED_TRACETest.CanBeRepeated
+[ FAILED ] SCOPED_TRACETest.WorksConcurrently
[ FAILED ] NonFatalFailureInFixtureConstructorTest.FailureInConstructor
[ FAILED ] FatalFailureInFixtureConstructorTest.FailureInConstructor
[ FAILED ] NonFatalFailureInSetUpTest.FailureInSetUp
@@ -553,8 +612,11 @@ Expected fatal failure.
[ FAILED ] ExpectFailureTest.ExpectNonFatalFailure
[ FAILED ] ExpectFailureTest.ExpectFatalFailureOnAllThreads
[ FAILED ] ExpectFailureTest.ExpectNonFatalFailureOnAllThreads
+[ FAILED ] ExpectFailureWithThreadsTest.ExpectFatalFailure
+[ FAILED ] ExpectFailureWithThreadsTest.ExpectNonFatalFailure
+[ FAILED ] ScopedFakeTestPartResultReporterTest.InterceptOnlyCurrentThread
-35 FAILED TESTS
+39 FAILED TESTS
 YOU HAVE 1 DISABLED TEST
Note: Google Test filter = FatalFailureTest.*:LoggingTest.*
diff --git a/gtest/test/gtest_shuffle_test.py b/gtest/test/gtest_shuffle_test.py
index a870a01..30d0303 100755
--- a/gtest/test/gtest_shuffle_test.py
+++ b/gtest/test/gtest_shuffle_test.py
@@ -78,16 +78,10 @@ def RandomSeedFlag(n):
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
- try:
- original_env = os.environ.copy()
- os.environ.update(extra_env)
- return gtest_test_utils.Subprocess([COMMAND] + args).output
- finally:
- for key in extra_env.iterkeys():
- if key in original_env:
- os.environ[key] = original_env[key]
- else:
- del os.environ[key]
+ environ_copy = os.environ.copy()
+ environ_copy.update(extra_env)
+
+ return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
diff --git a/gtest/test/gtest_stress_test.cc b/gtest/test/gtest_stress_test.cc
index 0034bb8..f5af78c 100644
--- a/gtest/test/gtest_stress_test.cc
+++ b/gtest/test/gtest_stress_test.cc
@@ -32,9 +32,11 @@
// Tests that SCOPED_TRACE() and various Google Test assertions can be
// used in a large number of threads concurrently.
-#include <iostream>
#include <gtest/gtest.h>
+#include <iostream>
+#include <vector>
+
// We must define this macro in order to #include
// gtest-internal-inl.h. This is how Google Test prevents a user from
// accidentally depending on its internal implementation.
@@ -42,12 +44,20 @@
#include "src/gtest-internal-inl.h"
#undef GTEST_IMPLEMENTATION_
+#if GTEST_IS_THREADSAFE
+
namespace testing {
namespace {
+using internal::Notification;
using internal::String;
using internal::TestPropertyKeyIs;
-using internal::Vector;
+using internal::ThreadWithParam;
+using internal::scoped_ptr;
+
+// In order to run tests in this file, for platforms where Google Test is
+// thread safe, implement ThreadWithParam. See the description of its API
+// in gtest-port.h, where it is defined for already supported platforms.
// How many threads to create?
const int kThreadCount = 50;
@@ -64,12 +74,13 @@ String IdToString(int id) {
return id_message.GetString();
}
-void ExpectKeyAndValueWereRecordedForId(const Vector<TestProperty>& properties,
- int id,
- const char* suffix) {
+void ExpectKeyAndValueWereRecordedForId(
+ const std::vector<TestProperty>& properties,
+ int id, const char* suffix) {
TestPropertyKeyIs matches_key(IdToKey(id, suffix).c_str());
- const TestProperty* property = properties.FindIf(matches_key);
- ASSERT_TRUE(property != NULL)
+ const std::vector<TestProperty>::const_iterator property =
+ std::find_if(properties.begin(), properties.end(), matches_key);
+ ASSERT_TRUE(property != properties.end())
<< "expecting " << suffix << " value for id " << id;
EXPECT_STREQ(IdToString(id).c_str(), property->value());
}
@@ -77,7 +88,7 @@ void ExpectKeyAndValueWereRecordedForId(const Vector<TestProperty>& properties,
// Calls a large number of Google Test assertions, where exactly one of them
// will fail.
void ManyAsserts(int id) {
- ::std::cout << "Thread #" << id << " running...\n";
+ GTEST_LOG_(INFO) << "Thread #" << id << " running...";
SCOPED_TRACE(Message() << "Thread #" << id);
@@ -104,41 +115,121 @@ void ManyAsserts(int id) {
}
}
+void CheckTestFailureCount(int expected_failures) {
+ const TestInfo* const info = UnitTest::GetInstance()->current_test_info();
+ const TestResult* const result = info->result();
+ GTEST_CHECK_(expected_failures == result->total_part_count())
+ << "Logged " << result->total_part_count() << " failures "
+ << " vs. " << expected_failures << " expected";
+}
+
// Tests using SCOPED_TRACE() and Google Test assertions in many threads
// concurrently.
TEST(StressTest, CanUseScopedTraceAndAssertionsInManyThreads) {
- // TODO(wan): when Google Test is made thread-safe, run
- // ManyAsserts() in many threads here.
+ {
+ scoped_ptr<ThreadWithParam<int> > threads[kThreadCount];
+ Notification threads_can_start;
+ for (int i = 0; i != kThreadCount; i++)
+ threads[i].reset(new ThreadWithParam<int>(&ManyAsserts,
+ i,
+ &threads_can_start));
+
+ threads_can_start.Notify();
+
+ // Blocks until all the threads are done.
+ for (int i = 0; i != kThreadCount; i++)
+ threads[i]->Join();
+ }
+
+ // Ensures that kThreadCount*kThreadCount failures have been reported.
+ const TestInfo* const info = UnitTest::GetInstance()->current_test_info();
+ const TestResult* const result = info->result();
+
+ std::vector<TestProperty> properties;
+ // We have no access to the TestResult's list of properties but we can
+ // copy them one by one.
+ for (int i = 0; i < result->test_property_count(); ++i)
+ properties.push_back(result->GetTestProperty(i));
+
+ EXPECT_EQ(kThreadCount * 2 + 1, result->test_property_count())
+ << "String and int values recorded on each thread, "
+ << "as well as one shared_key";
+ for (int i = 0; i < kThreadCount; ++i) {
+ ExpectKeyAndValueWereRecordedForId(properties, i, "string");
+ ExpectKeyAndValueWereRecordedForId(properties, i, "int");
+ }
+ CheckTestFailureCount(kThreadCount*kThreadCount);
+}
+
+void FailingThread(bool is_fatal) {
+ if (is_fatal)
+ FAIL() << "Fatal failure in some other thread. "
+ << "(This failure is expected.)";
+ else
+ ADD_FAILURE() << "Non-fatal failure in some other thread. "
+ << "(This failure is expected.)";
+}
+
+void GenerateFatalFailureInAnotherThread(bool is_fatal) {
+ ThreadWithParam<bool> thread(&FailingThread, is_fatal, NULL);
+ thread.Join();
}
TEST(NoFatalFailureTest, ExpectNoFatalFailureIgnoresFailuresInOtherThreads) {
- // TODO(mheule@google.com): Test this works correctly when Google
- // Test is made thread-safe.
+ EXPECT_NO_FATAL_FAILURE(GenerateFatalFailureInAnotherThread(true));
+ // We should only have one failure (the one from
+ // GenerateFatalFailureInAnotherThread()), since the EXPECT_NO_FATAL_FAILURE
+ // should succeed.
+ CheckTestFailureCount(1);
}
+void AssertNoFatalFailureIgnoresFailuresInOtherThreads() {
+ ASSERT_NO_FATAL_FAILURE(GenerateFatalFailureInAnotherThread(true));
+}
TEST(NoFatalFailureTest, AssertNoFatalFailureIgnoresFailuresInOtherThreads) {
- // TODO(mheule@google.com): Test this works correctly when Google
- // Test is made thread-safe.
+ // Using a subroutine, to make sure, that the test continues.
+ AssertNoFatalFailureIgnoresFailuresInOtherThreads();
+ // We should only have one failure (the one from
+ // GenerateFatalFailureInAnotherThread()), since the EXPECT_NO_FATAL_FAILURE
+ // should succeed.
+ CheckTestFailureCount(1);
}
TEST(FatalFailureTest, ExpectFatalFailureIgnoresFailuresInOtherThreads) {
- // TODO(mheule@google.com): Test this works correctly when Google
- // Test is made thread-safe.
+ // This statement should fail, since the current thread doesn't generate a
+ // fatal failure, only another one does.
+ EXPECT_FATAL_FAILURE(GenerateFatalFailureInAnotherThread(true), "expected");
+ CheckTestFailureCount(2);
}
TEST(FatalFailureOnAllThreadsTest, ExpectFatalFailureOnAllThreads) {
- // TODO(wan@google.com): Test this works correctly when Google Test
- // is made thread-safe.
+ // This statement should succeed, because failures in all threads are
+ // considered.
+ EXPECT_FATAL_FAILURE_ON_ALL_THREADS(
+ GenerateFatalFailureInAnotherThread(true), "expected");
+ CheckTestFailureCount(0);
+ // We need to add a failure, because main() checks that there are failures.
+ // But when only this test is run, we shouldn't have any failures.
+ ADD_FAILURE() << "This is an expected non-fatal failure.";
}
TEST(NonFatalFailureTest, ExpectNonFatalFailureIgnoresFailuresInOtherThreads) {
- // TODO(mheule@google.com): Test this works correctly when Google
- // Test is made thread-safe.
+ // This statement should fail, since the current thread doesn't generate a
+ // fatal failure, only another one does.
+ EXPECT_NONFATAL_FAILURE(GenerateFatalFailureInAnotherThread(false),
+ "expected");
+ CheckTestFailureCount(2);
}
TEST(NonFatalFailureOnAllThreadsTest, ExpectNonFatalFailureOnAllThreads) {
- // TODO(wan@google.com): Test this works correctly when Google Test
- // is made thread-safe.
+ // This statement should succeed, because failures in all threads are
+ // considered.
+ EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(
+ GenerateFatalFailureInAnotherThread(false), "expected");
+ CheckTestFailureCount(0);
+ // We need to add a failure, because main() checks that there are failures,
+ // But when only this test is run, we shouldn't have any failures.
+ ADD_FAILURE() << "This is an expected non-fatal failure.";
}
} // namespace
@@ -147,5 +238,20 @@ TEST(NonFatalFailureOnAllThreadsTest, ExpectNonFatalFailureOnAllThreads) {
int main(int argc, char **argv) {
testing::InitGoogleTest(&argc, argv);
+ const int result = RUN_ALL_TESTS(); // Expected to fail.
+ GTEST_CHECK_(result == 1) << "RUN_ALL_TESTS() did not fail as expected";
+
+ printf("\nPASS\n");
+ return 0;
+}
+
+#else
+TEST(StressTest,
+ DISABLED_ThreadSafetyTestsAreSkippedWhenGoogleTestIsNotThreadSafe) {
+}
+
+int main(int argc, char **argv) {
+ testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
+#endif // GTEST_IS_THREADSAFE
diff --git a/gtest/test/gtest_test_utils.py b/gtest/test/gtest_test_utils.py
index 385662a..e0f5973 100755
--- a/gtest/test/gtest_test_utils.py
+++ b/gtest/test/gtest_test_utils.py
@@ -51,6 +51,7 @@ except:
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
+GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
@@ -137,7 +138,7 @@ def GetTempDir():
return _temp_dir
-def GetTestExecutablePath(executable_name):
+def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
@@ -145,12 +146,15 @@ def GetTestExecutablePath(executable_name):
Args:
executable_name: name of the test binary that the test script runs.
+ build_dir: directory where to look for executables, by default
+ the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
- path = os.path.abspath(os.path.join(GetBuildDir(), executable_name))
+ path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
+ executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
@@ -190,23 +194,28 @@ def GetExitStatus(exit_code):
class Subprocess:
- def __init__(self, command, working_dir=None, capture_stderr=True):
+ def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
- Restores the old directory afterwards. Execution results are returned
- via the following attributes:
- terminated_by_sygnal True iff the child process has been terminated
- by a signal.
- signal Sygnal that terminated the child process.
- exited True iff the child process exited normally.
- exit_code The code with which the child proces exited.
- output Child process's stdout and stderr output
- combined in a string.
+
+ Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
+ env: Dictionary with environment to pass to the subprocess.
+
+ Returns:
+ An object that represents outcome of the executed process. It has the
+ following attributes:
+ terminated_by_signal True iff the child process has been terminated
+ by a signal.
+ signal Sygnal that terminated the child process.
+ exited True iff the child process exited normally.
+ exit_code The code with which the child process exited.
+ output Child process's stdout and stderr output
+ combined in a string.
"""
# The subprocess module is the preferrable way of running programs
@@ -224,13 +233,30 @@ class Subprocess:
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
- cwd=working_dir, universal_newlines=True)
+ cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
+
+ def _ReplaceEnvDict(dest, src):
+ # Changes made by os.environ.clear are not inheritable by child
+ # processes until Python 2.6. To produce inheritable changes we have
+ # to delete environment items with the del statement.
+ for key in dest:
+ del dest[key]
+ dest.update(src)
+
+ # When 'env' is not None, backup the environment variables and replace
+ # them with the passed 'env'. When 'env' is None, we simply use the
+ # current 'os.environ' for compatibility with the subprocess.Popen
+ # semantics used above.
+ if env is not None:
+ old_environ = os.environ.copy()
+ _ReplaceEnvDict(os.environ, env)
+
try:
if working_dir is not None:
os.chdir(working_dir)
@@ -243,6 +269,12 @@ class Subprocess:
ret_code = p.wait()
finally:
os.chdir(old_dir)
+
+ # Restore the old environment variables
+ # if they were replaced.
+ if env is not None:
+ _ReplaceEnvDict(os.environ, old_environ)
+
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
@@ -267,4 +299,11 @@ def Main():
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
+ # The tested binaries should not be writing XML output files unless the
+ # script explicitly instructs them to.
+ # TODO(vladl@google.com): Move this into Subprocess when we implement
+ # passing environment into it as a parameter.
+ if GTEST_OUTPUT_VAR_NAME in os.environ:
+ del os.environ[GTEST_OUTPUT_VAR_NAME]
+
_test_module.main()
diff --git a/gtest/test/gtest_unittest.cc b/gtest/test/gtest_unittest.cc
index 5c69b46..a14f065 100644
--- a/gtest/test/gtest_unittest.cc
+++ b/gtest/test/gtest_unittest.cc
@@ -33,6 +33,7 @@
// Google Test work.
#include <gtest/gtest.h>
+#include <vector>
// Verifies that the command line flag variables can be accessed
// in code once <gtest/gtest.h> has been #included.
@@ -70,21 +71,11 @@ TEST(CommandLineFlagsTest, CanBeAccessedInCodeOnceGTestHIsIncluded) {
#include <stdlib.h>
#include <time.h>
-#if GTEST_HAS_PTHREAD
-#include <pthread.h>
-#endif // GTEST_HAS_PTHREAD
-
-#ifdef __BORLANDC__
#include <map>
-#endif
namespace testing {
namespace internal {
-bool ShouldUseColor(bool stdout_is_tty);
-const char* FormatTimeInMillisAsSeconds(TimeInMillis ms);
-bool ParseInt32Flag(const char* str, const char* flag, Int32* value);
-
// Provides access to otherwise private parts of the TestEventListeners class
// that are needed to test it.
class TestEventListenersAccessor {
@@ -148,15 +139,19 @@ using testing::TestPartResultArray;
using testing::TestProperty;
using testing::TestResult;
using testing::UnitTest;
+using testing::kMaxStackTraceDepth;
using testing::internal::AlwaysFalse;
using testing::internal::AlwaysTrue;
using testing::internal::AppendUserMessage;
using testing::internal::CodePointToUtf8;
+using testing::internal::CountIf;
using testing::internal::EqFailure;
using testing::internal::FloatingPoint;
using testing::internal::FormatTimeInMillisAsSeconds;
+using testing::internal::ForEach;
using testing::internal::GTestFlagSaver;
using testing::internal::GetCurrentOsStackTraceExceptTop;
+using testing::internal::GetElementOr;
using testing::internal::GetNextRandomSeed;
using testing::internal::GetRandomSeedFromFlag;
using testing::internal::GetTestTypeId;
@@ -168,26 +163,35 @@ using testing::internal::ParseInt32Flag;
using testing::internal::ShouldRunTestOnShard;
using testing::internal::ShouldShard;
using testing::internal::ShouldUseColor;
+using testing::internal::Shuffle;
+using testing::internal::ShuffleRange;
using testing::internal::StreamableToString;
using testing::internal::String;
using testing::internal::TestEventListenersAccessor;
using testing::internal::TestResultAccessor;
-using testing::internal::ThreadLocal;
using testing::internal::UInt32;
-using testing::internal::Vector;
using testing::internal::WideStringToUtf8;
using testing::internal::kMaxRandomSeed;
using testing::internal::kTestTypeIdInGoogleTest;
using testing::internal::scoped_ptr;
-class TestingVector : public Vector<int> {
+#if GTEST_HAS_STREAM_REDIRECTION_
+using testing::internal::CaptureStdout;
+using testing::internal::GetCapturedStdout;
+#endif // GTEST_HAS_STREAM_REDIRECTION_
+
+#if GTEST_IS_THREADSAFE
+using testing::internal::ThreadWithParam;
+#endif
+
+class TestingVector : public std::vector<int> {
};
::std::ostream& operator<<(::std::ostream& os,
const TestingVector& vector) {
os << "{ ";
- for (int i = 0; i < vector.size(); i++) {
- os << vector.GetElement(i) << " ";
+ for (size_t i = 0; i < vector.size(); i++) {
+ os << vector[i] << " ";
}
os << "}";
return os;
@@ -266,27 +270,26 @@ TEST(GetTestTypeIdTest, ReturnsTheSameValueInsideOrOutsideOfGoogleTest) {
// Tests FormatTimeInMillisAsSeconds().
TEST(FormatTimeInMillisAsSecondsTest, FormatsZero) {
- EXPECT_STREQ("0", FormatTimeInMillisAsSeconds(0));
+ EXPECT_EQ("0", FormatTimeInMillisAsSeconds(0));
}
TEST(FormatTimeInMillisAsSecondsTest, FormatsPositiveNumber) {
- EXPECT_STREQ("0.003", FormatTimeInMillisAsSeconds(3));
- EXPECT_STREQ("0.01", FormatTimeInMillisAsSeconds(10));
- EXPECT_STREQ("0.2", FormatTimeInMillisAsSeconds(200));
- EXPECT_STREQ("1.2", FormatTimeInMillisAsSeconds(1200));
- EXPECT_STREQ("3", FormatTimeInMillisAsSeconds(3000));
+ EXPECT_EQ("0.003", FormatTimeInMillisAsSeconds(3));
+ EXPECT_EQ("0.01", FormatTimeInMillisAsSeconds(10));
+ EXPECT_EQ("0.2", FormatTimeInMillisAsSeconds(200));
+ EXPECT_EQ("1.2", FormatTimeInMillisAsSeconds(1200));
+ EXPECT_EQ("3", FormatTimeInMillisAsSeconds(3000));
}
TEST(FormatTimeInMillisAsSecondsTest, FormatsNegativeNumber) {
- EXPECT_STREQ("-0.003", FormatTimeInMillisAsSeconds(-3));
- EXPECT_STREQ("-0.01", FormatTimeInMillisAsSeconds(-10));
- EXPECT_STREQ("-0.2", FormatTimeInMillisAsSeconds(-200));
- EXPECT_STREQ("-1.2", FormatTimeInMillisAsSeconds(-1200));
- EXPECT_STREQ("-3", FormatTimeInMillisAsSeconds(-3000));
+ EXPECT_EQ("-0.003", FormatTimeInMillisAsSeconds(-3));
+ EXPECT_EQ("-0.01", FormatTimeInMillisAsSeconds(-10));
+ EXPECT_EQ("-0.2", FormatTimeInMillisAsSeconds(-200));
+ EXPECT_EQ("-1.2", FormatTimeInMillisAsSeconds(-1200));
+ EXPECT_EQ("-3", FormatTimeInMillisAsSeconds(-3000));
}
-#if !GTEST_OS_SYMBIAN
-// NULL testing does not work with Symbian compilers.
+#if GTEST_CAN_COMPARE_NULL
#ifdef __BORLANDC__
// Silences warnings: "Condition is always true", "Unreachable code"
@@ -320,11 +323,11 @@ TEST(NullLiteralTest, IsFalseForNonNullLiterals) {
}
#ifdef __BORLANDC__
-// Restores warnings after previous "#pragma option push" supressed them
+// Restores warnings after previous "#pragma option push" suppressed them.
#pragma option pop
#endif
-#endif // !GTEST_OS_SYMBIAN
+#endif // GTEST_CAN_COMPARE_NULL
//
// Tests CodePointToUtf8().
@@ -547,339 +550,80 @@ TEST(RandomTest, RepeatsWhenReseeded) {
}
}
-// Tests the Vector class template.
-
-// Tests Vector::Clear().
-TEST(VectorTest, Clear) {
- Vector<int> a;
- a.PushBack(1);
- a.Clear();
- EXPECT_EQ(0, a.size());
+// Tests STL container utilities.
- a.PushBack(2);
- a.PushBack(3);
- a.Clear();
- EXPECT_EQ(0, a.size());
-}
-
-// Tests Vector::PushBack().
-TEST(VectorTest, PushBack) {
- Vector<char> a;
- a.PushBack('a');
- ASSERT_EQ(1, a.size());
- EXPECT_EQ('a', a.GetElement(0));
-
- a.PushBack('b');
- ASSERT_EQ(2, a.size());
- EXPECT_EQ('a', a.GetElement(0));
- EXPECT_EQ('b', a.GetElement(1));
-}
-
-// Tests Vector::PushFront().
-TEST(VectorTest, PushFront) {
- Vector<int> a;
- ASSERT_EQ(0, a.size());
-
- // Calls PushFront() on an empty Vector.
- a.PushFront(1);
- ASSERT_EQ(1, a.size());
- EXPECT_EQ(1, a.GetElement(0));
-
- // Calls PushFront() on a singleton Vector.
- a.PushFront(2);
- ASSERT_EQ(2, a.size());
- EXPECT_EQ(2, a.GetElement(0));
- EXPECT_EQ(1, a.GetElement(1));
-
- // Calls PushFront() on a Vector with more than one elements.
- a.PushFront(3);
- ASSERT_EQ(3, a.size());
- EXPECT_EQ(3, a.GetElement(0));
- EXPECT_EQ(2, a.GetElement(1));
- EXPECT_EQ(1, a.GetElement(2));
-}
-
-// Tests Vector::PopFront().
-TEST(VectorTest, PopFront) {
- Vector<int> a;
-
- // Popping on an empty Vector should fail.
- EXPECT_FALSE(a.PopFront(NULL));
-
- // Popping again on an empty Vector should fail, and the result element
- // shouldn't be overwritten.
- int element = 1;
- EXPECT_FALSE(a.PopFront(&element));
- EXPECT_EQ(1, element);
-
- a.PushFront(2);
- a.PushFront(3);
-
- // PopFront() should pop the element in the front of the Vector.
- EXPECT_TRUE(a.PopFront(&element));
- EXPECT_EQ(3, element);
-
- // After popping the last element, the Vector should be empty.
- EXPECT_TRUE(a.PopFront(NULL));
- EXPECT_EQ(0, a.size());
-}
-
-// Tests inserting at the beginning using Vector::Insert().
-TEST(VectorTest, InsertAtBeginning) {
- Vector<int> a;
- ASSERT_EQ(0, a.size());
-
- // Inserts into an empty Vector.
- a.Insert(1, 0);
- ASSERT_EQ(1, a.size());
- EXPECT_EQ(1, a.GetElement(0));
-
- // Inserts at the beginning of a singleton Vector.
- a.Insert(2, 0);
- ASSERT_EQ(2, a.size());
- EXPECT_EQ(2, a.GetElement(0));
- EXPECT_EQ(1, a.GetElement(1));
-
- // Inserts at the beginning of a Vector with more than one elements.
- a.Insert(3, 0);
- ASSERT_EQ(3, a.size());
- EXPECT_EQ(3, a.GetElement(0));
- EXPECT_EQ(2, a.GetElement(1));
- EXPECT_EQ(1, a.GetElement(2));
-}
-
-// Tests inserting at a location other than the beginning using
-// Vector::Insert().
-TEST(VectorTest, InsertNotAtBeginning) {
- // Prepares a singleton Vector.
- Vector<int> a;
- a.PushBack(1);
-
- // Inserts at the end of a singleton Vector.
- a.Insert(2, a.size());
- ASSERT_EQ(2, a.size());
- EXPECT_EQ(1, a.GetElement(0));
- EXPECT_EQ(2, a.GetElement(1));
-
- // Inserts at the end of a Vector with more than one elements.
- a.Insert(3, a.size());
- ASSERT_EQ(3, a.size());
- EXPECT_EQ(1, a.GetElement(0));
- EXPECT_EQ(2, a.GetElement(1));
- EXPECT_EQ(3, a.GetElement(2));
-
- // Inserts in the middle of a Vector.
- a.Insert(4, 1);
- ASSERT_EQ(4, a.size());
- EXPECT_EQ(1, a.GetElement(0));
- EXPECT_EQ(4, a.GetElement(1));
- EXPECT_EQ(2, a.GetElement(2));
- EXPECT_EQ(3, a.GetElement(3));
-}
-
-// Tests Vector::GetElementOr().
-TEST(VectorTest, GetElementOr) {
- Vector<char> a;
- EXPECT_EQ('x', a.GetElementOr(0, 'x'));
-
- a.PushBack('a');
- a.PushBack('b');
- EXPECT_EQ('a', a.GetElementOr(0, 'x'));
- EXPECT_EQ('b', a.GetElementOr(1, 'x'));
- EXPECT_EQ('x', a.GetElementOr(-2, 'x'));
- EXPECT_EQ('x', a.GetElementOr(2, 'x'));
-}
-
-TEST(VectorTest, Swap) {
- Vector<int> a;
- a.PushBack(0);
- a.PushBack(1);
- a.PushBack(2);
-
- // Swaps an element with itself.
- a.Swap(0, 0);
- ASSERT_EQ(0, a.GetElement(0));
- ASSERT_EQ(1, a.GetElement(1));
- ASSERT_EQ(2, a.GetElement(2));
-
- // Swaps two different elements where the indices go up.
- a.Swap(0, 1);
- ASSERT_EQ(1, a.GetElement(0));
- ASSERT_EQ(0, a.GetElement(1));
- ASSERT_EQ(2, a.GetElement(2));
-
- // Swaps two different elements where the indices go down.
- a.Swap(2, 0);
- ASSERT_EQ(2, a.GetElement(0));
- ASSERT_EQ(0, a.GetElement(1));
- ASSERT_EQ(1, a.GetElement(2));
-}
-
-TEST(VectorTest, Clone) {
- // Clones an empty Vector.
- Vector<int> a;
- scoped_ptr<Vector<int> > empty(a.Clone());
- EXPECT_EQ(0, empty->size());
-
- // Clones a singleton.
- a.PushBack(42);
- scoped_ptr<Vector<int> > singleton(a.Clone());
- ASSERT_EQ(1, singleton->size());
- EXPECT_EQ(42, singleton->GetElement(0));
-
- // Clones a Vector with more elements.
- a.PushBack(43);
- a.PushBack(44);
- scoped_ptr<Vector<int> > big(a.Clone());
- ASSERT_EQ(3, big->size());
- EXPECT_EQ(42, big->GetElement(0));
- EXPECT_EQ(43, big->GetElement(1));
- EXPECT_EQ(44, big->GetElement(2));
-}
-
-// Tests Vector::Erase().
-TEST(VectorDeathTest, Erase) {
- Vector<int> a;
-
- // Tests erasing from an empty vector.
- EXPECT_DEATH_IF_SUPPORTED(
- a.Erase(0),
- "Invalid Vector index 0: must be in range \\[0, -1\\]\\.");
+// Tests CountIf().
- // Tests erasing from a singleton vector.
- a.PushBack(0);
+static bool IsPositive(int n) { return n > 0; }
- a.Erase(0);
- EXPECT_EQ(0, a.size());
+TEST(ContainerUtilityTest, CountIf) {
+ std::vector<int> v;
+ EXPECT_EQ(0, CountIf(v, IsPositive)); // Works for an empty container.
- // Tests Erase parameters beyond the bounds of the vector.
- Vector<int> a1;
- a1.PushBack(0);
- a1.PushBack(1);
- a1.PushBack(2);
+ v.push_back(-1);
+ v.push_back(0);
+ EXPECT_EQ(0, CountIf(v, IsPositive)); // Works when no value satisfies.
- EXPECT_DEATH_IF_SUPPORTED(
- a1.Erase(3),
- "Invalid Vector index 3: must be in range \\[0, 2\\]\\.");
- EXPECT_DEATH_IF_SUPPORTED(
- a1.Erase(-1),
- "Invalid Vector index -1: must be in range \\[0, 2\\]\\.");
-
- // Tests erasing at the end of the vector.
- Vector<int> a2;
- a2.PushBack(0);
- a2.PushBack(1);
- a2.PushBack(2);
-
- a2.Erase(2);
- ASSERT_EQ(2, a2.size());
- EXPECT_EQ(0, a2.GetElement(0));
- EXPECT_EQ(1, a2.GetElement(1));
-
- // Tests erasing in the middle of the vector.
- Vector<int> a3;
- a3.PushBack(0);
- a3.PushBack(1);
- a3.PushBack(2);
-
- a3.Erase(1);
- ASSERT_EQ(2, a3.size());
- EXPECT_EQ(0, a3.GetElement(0));
- EXPECT_EQ(2, a3.GetElement(1));
-
- // Tests erasing at the beginning of the vector.
- Vector<int> a4;
- a4.PushBack(0);
- a4.PushBack(1);
- a4.PushBack(2);
-
- a4.Erase(0);
- ASSERT_EQ(2, a4.size());
- EXPECT_EQ(1, a4.GetElement(0));
- EXPECT_EQ(2, a4.GetElement(1));
-}
-
-// Tests the GetElement accessor.
-TEST(VectorDeathTest, GetElement) {
- Vector<int> a;
- a.PushBack(0);
- a.PushBack(1);
- a.PushBack(2);
- const Vector<int>& b = a;
-
- EXPECT_EQ(0, b.GetElement(0));
- EXPECT_EQ(1, b.GetElement(1));
- EXPECT_EQ(2, b.GetElement(2));
- EXPECT_DEATH_IF_SUPPORTED(
- b.GetElement(3),
- "Invalid Vector index 3: must be in range \\[0, 2\\]\\.");
- EXPECT_DEATH_IF_SUPPORTED(
- b.GetElement(-1),
- "Invalid Vector index -1: must be in range \\[0, 2\\]\\.");
+ v.push_back(2);
+ v.push_back(-10);
+ v.push_back(10);
+ EXPECT_EQ(2, CountIf(v, IsPositive));
}
-// Tests the GetMutableElement accessor.
-TEST(VectorDeathTest, GetMutableElement) {
- Vector<int> a;
- a.PushBack(0);
- a.PushBack(1);
- a.PushBack(2);
+// Tests ForEach().
- EXPECT_EQ(0, a.GetMutableElement(0));
- EXPECT_EQ(1, a.GetMutableElement(1));
- EXPECT_EQ(2, a.GetMutableElement(2));
+static int g_sum = 0;
+static void Accumulate(int n) { g_sum += n; }
- a.GetMutableElement(0) = 42;
- EXPECT_EQ(42, a.GetMutableElement(0));
- EXPECT_EQ(1, a.GetMutableElement(1));
- EXPECT_EQ(2, a.GetMutableElement(2));
+TEST(ContainerUtilityTest, ForEach) {
+ std::vector<int> v;
+ g_sum = 0;
+ ForEach(v, Accumulate);
+ EXPECT_EQ(0, g_sum); // Works for an empty container;
- EXPECT_DEATH_IF_SUPPORTED(
- a.GetMutableElement(3),
- "Invalid Vector index 3: must be in range \\[0, 2\\]\\.");
- EXPECT_DEATH_IF_SUPPORTED(
- a.GetMutableElement(-1),
- "Invalid Vector index -1: must be in range \\[0, 2\\]\\.");
+ g_sum = 0;
+ v.push_back(1);
+ ForEach(v, Accumulate);
+ EXPECT_EQ(1, g_sum); // Works for a container with one element.
+
+ g_sum = 0;
+ v.push_back(20);
+ v.push_back(300);
+ ForEach(v, Accumulate);
+ EXPECT_EQ(321, g_sum);
}
-TEST(VectorDeathTest, Swap) {
- Vector<int> a;
- a.PushBack(0);
- a.PushBack(1);
- a.PushBack(2);
+// Tests GetElementOr().
+TEST(ContainerUtilityTest, GetElementOr) {
+ std::vector<char> a;
+ EXPECT_EQ('x', GetElementOr(a, 0, 'x'));
- EXPECT_DEATH_IF_SUPPORTED(
- a.Swap(-1, 1),
- "Invalid first swap element -1: must be in range \\[0, 2\\]");
- EXPECT_DEATH_IF_SUPPORTED(
- a.Swap(3, 1),
- "Invalid first swap element 3: must be in range \\[0, 2\\]");
- EXPECT_DEATH_IF_SUPPORTED(
- a.Swap(1, -1),
- "Invalid second swap element -1: must be in range \\[0, 2\\]");
- EXPECT_DEATH_IF_SUPPORTED(
- a.Swap(1, 3),
- "Invalid second swap element 3: must be in range \\[0, 2\\]");
+ a.push_back('a');
+ a.push_back('b');
+ EXPECT_EQ('a', GetElementOr(a, 0, 'x'));
+ EXPECT_EQ('b', GetElementOr(a, 1, 'x'));
+ EXPECT_EQ('x', GetElementOr(a, -2, 'x'));
+ EXPECT_EQ('x', GetElementOr(a, 2, 'x'));
}
-TEST(VectorDeathTest, ShuffleRange) {
- Vector<int> a;
- a.PushBack(0);
- a.PushBack(1);
- a.PushBack(2);
+TEST(ContainerUtilityDeathTest, ShuffleRange) {
+ std::vector<int> a;
+ a.push_back(0);
+ a.push_back(1);
+ a.push_back(2);
testing::internal::Random random(1);
EXPECT_DEATH_IF_SUPPORTED(
- a.ShuffleRange(&random, -1, 1),
+ ShuffleRange(&random, -1, 1, &a),
"Invalid shuffle range start -1: must be in range \\[0, 3\\]");
EXPECT_DEATH_IF_SUPPORTED(
- a.ShuffleRange(&random, 4, 4),
+ ShuffleRange(&random, 4, 4, &a),
"Invalid shuffle range start 4: must be in range \\[0, 3\\]");
EXPECT_DEATH_IF_SUPPORTED(
- a.ShuffleRange(&random, 3, 2),
+ ShuffleRange(&random, 3, 2, &a),
"Invalid shuffle range finish 2: must be in range \\[3, 3\\]");
EXPECT_DEATH_IF_SUPPORTED(
- a.ShuffleRange(&random, 3, 4),
+ ShuffleRange(&random, 3, 4, &a),
"Invalid shuffle range finish 4: must be in range \\[3, 3\\]");
}
@@ -889,18 +633,18 @@ class VectorShuffleTest : public Test {
VectorShuffleTest() : random_(1) {
for (int i = 0; i < kVectorSize; i++) {
- vector_.PushBack(i);
+ vector_.push_back(i);
}
}
static bool VectorIsCorrupt(const TestingVector& vector) {
- if (kVectorSize != vector.size()) {
+ if (kVectorSize != static_cast<int>(vector.size())) {
return true;
}
bool found_in_vector[kVectorSize] = { false };
- for (int i = 0; i < vector.size(); i++) {
- const int e = vector.GetElement(i);
+ for (size_t i = 0; i < vector.size(); i++) {
+ const int e = vector[i];
if (e < 0 || e >= kVectorSize || found_in_vector[e]) {
return true;
}
@@ -918,7 +662,7 @@ class VectorShuffleTest : public Test {
static bool RangeIsShuffled(const TestingVector& vector, int begin, int end) {
for (int i = begin; i < end; i++) {
- if (i != vector.GetElement(i)) {
+ if (i != vector[i]) {
return true;
}
}
@@ -931,7 +675,7 @@ class VectorShuffleTest : public Test {
}
static bool VectorIsShuffled(const TestingVector& vector) {
- return RangeIsShuffled(vector, 0, vector.size());
+ return RangeIsShuffled(vector, 0, static_cast<int>(vector.size()));
}
static bool VectorIsUnshuffled(const TestingVector& vector) {
@@ -946,39 +690,39 @@ const int VectorShuffleTest::kVectorSize;
TEST_F(VectorShuffleTest, HandlesEmptyRange) {
// Tests an empty range at the beginning...
- vector_.ShuffleRange(&random_, 0, 0);
+ ShuffleRange(&random_, 0, 0, &vector_);
ASSERT_PRED1(VectorIsNotCorrupt, vector_);
ASSERT_PRED1(VectorIsUnshuffled, vector_);
// ...in the middle...
- vector_.ShuffleRange(&random_, kVectorSize/2, kVectorSize/2);
+ ShuffleRange(&random_, kVectorSize/2, kVectorSize/2, &vector_);
ASSERT_PRED1(VectorIsNotCorrupt, vector_);
ASSERT_PRED1(VectorIsUnshuffled, vector_);
// ...at the end...
- vector_.ShuffleRange(&random_, kVectorSize - 1, kVectorSize - 1);
+ ShuffleRange(&random_, kVectorSize - 1, kVectorSize - 1, &vector_);
ASSERT_PRED1(VectorIsNotCorrupt, vector_);
ASSERT_PRED1(VectorIsUnshuffled, vector_);
// ...and past the end.
- vector_.ShuffleRange(&random_, kVectorSize, kVectorSize);
+ ShuffleRange(&random_, kVectorSize, kVectorSize, &vector_);
ASSERT_PRED1(VectorIsNotCorrupt, vector_);
ASSERT_PRED1(VectorIsUnshuffled, vector_);
}
TEST_F(VectorShuffleTest, HandlesRangeOfSizeOne) {
// Tests a size one range at the beginning...
- vector_.ShuffleRange(&random_, 0, 1);
+ ShuffleRange(&random_, 0, 1, &vector_);
ASSERT_PRED1(VectorIsNotCorrupt, vector_);
ASSERT_PRED1(VectorIsUnshuffled, vector_);
// ...in the middle...
- vector_.ShuffleRange(&random_, kVectorSize/2, kVectorSize/2 + 1);
+ ShuffleRange(&random_, kVectorSize/2, kVectorSize/2 + 1, &vector_);
ASSERT_PRED1(VectorIsNotCorrupt, vector_);
ASSERT_PRED1(VectorIsUnshuffled, vector_);
// ...and at the end.
- vector_.ShuffleRange(&random_, kVectorSize - 1, kVectorSize);
+ ShuffleRange(&random_, kVectorSize - 1, kVectorSize, &vector_);
ASSERT_PRED1(VectorIsNotCorrupt, vector_);
ASSERT_PRED1(VectorIsUnshuffled, vector_);
}
@@ -987,20 +731,20 @@ TEST_F(VectorShuffleTest, HandlesRangeOfSizeOne) {
// we can guarantee that the following "random" tests will succeed.
TEST_F(VectorShuffleTest, ShufflesEntireVector) {
- vector_.Shuffle(&random_);
+ Shuffle(&random_, &vector_);
ASSERT_PRED1(VectorIsNotCorrupt, vector_);
EXPECT_FALSE(VectorIsUnshuffled(vector_)) << vector_;
// Tests the first and last elements in particular to ensure that
// there are no off-by-one problems in our shuffle algorithm.
- EXPECT_NE(0, vector_.GetElement(0));
- EXPECT_NE(kVectorSize - 1, vector_.GetElement(kVectorSize - 1));
+ EXPECT_NE(0, vector_[0]);
+ EXPECT_NE(kVectorSize - 1, vector_[kVectorSize - 1]);
}
TEST_F(VectorShuffleTest, ShufflesStartOfVector) {
const int kRangeSize = kVectorSize/2;
- vector_.ShuffleRange(&random_, 0, kRangeSize);
+ ShuffleRange(&random_, 0, kRangeSize, &vector_);
ASSERT_PRED1(VectorIsNotCorrupt, vector_);
EXPECT_PRED3(RangeIsShuffled, vector_, 0, kRangeSize);
@@ -1009,7 +753,7 @@ TEST_F(VectorShuffleTest, ShufflesStartOfVector) {
TEST_F(VectorShuffleTest, ShufflesEndOfVector) {
const int kRangeSize = kVectorSize / 2;
- vector_.ShuffleRange(&random_, kRangeSize, kVectorSize);
+ ShuffleRange(&random_, kRangeSize, kVectorSize, &vector_);
ASSERT_PRED1(VectorIsNotCorrupt, vector_);
EXPECT_PRED3(RangeIsUnshuffled, vector_, 0, kRangeSize);
@@ -1018,7 +762,7 @@ TEST_F(VectorShuffleTest, ShufflesEndOfVector) {
TEST_F(VectorShuffleTest, ShufflesMiddleOfVector) {
int kRangeSize = kVectorSize/3;
- vector_.ShuffleRange(&random_, kRangeSize, 2*kRangeSize);
+ ShuffleRange(&random_, kRangeSize, 2*kRangeSize, &vector_);
ASSERT_PRED1(VectorIsNotCorrupt, vector_);
EXPECT_PRED3(RangeIsUnshuffled, vector_, 0, kRangeSize);
@@ -1029,20 +773,19 @@ TEST_F(VectorShuffleTest, ShufflesMiddleOfVector) {
TEST_F(VectorShuffleTest, ShufflesRepeatably) {
TestingVector vector2;
for (int i = 0; i < kVectorSize; i++) {
- vector2.PushBack(i);
+ vector2.push_back(i);
}
random_.Reseed(1234);
- vector_.Shuffle(&random_);
+ Shuffle(&random_, &vector_);
random_.Reseed(1234);
- vector2.Shuffle(&random_);
+ Shuffle(&random_, &vector2);
ASSERT_PRED1(VectorIsNotCorrupt, vector_);
ASSERT_PRED1(VectorIsNotCorrupt, vector2);
for (int i = 0; i < kVectorSize; i++) {
- EXPECT_EQ(vector_.GetElement(i), vector2.GetElement(i))
- << " where i is " << i;
+ EXPECT_EQ(vector_[i], vector2[i]) << " where i is " << i;
}
}
@@ -1107,8 +850,6 @@ TEST(StringTest, Constructors) {
EXPECT_EQ('c', s7.c_str()[3]);
}
-#if GTEST_HAS_STD_STRING
-
TEST(StringTest, ConvertsFromStdString) {
// An empty std::string.
const std::string src1("");
@@ -1148,8 +889,6 @@ TEST(StringTest, ConvertsToStdString) {
EXPECT_EQ(std::string("x\0y", 3), dest3);
}
-#endif // GTEST_HAS_STD_STRING
-
#if GTEST_HAS_GLOBAL_STRING
TEST(StringTest, ConvertsFromGlobalString) {
@@ -1383,12 +1122,16 @@ TEST(StringTest, CanBeAssignedSelf) {
EXPECT_STREQ("hello", dest.c_str());
}
+// Sun Studio < 12 incorrectly rejects this code due to an overloading
+// ambiguity.
+#if !(defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590)
// Tests streaming a String.
TEST(StringTest, Streams) {
EXPECT_EQ(StreamableToString(String()), "(null)");
EXPECT_EQ(StreamableToString(String("")), "");
EXPECT_EQ(StreamableToString(String("a\0b", 3)), "a\\0b");
}
+#endif
// Tests that String::Format() works.
TEST(StringTest, FormatWorks) {
@@ -1532,25 +1275,14 @@ TEST_F(ScopedFakeTestPartResultReporterTest, DeprecatedConstructor) {
EXPECT_EQ(1, results.size());
}
-#if GTEST_IS_THREADSAFE && GTEST_HAS_PTHREAD
+#if GTEST_IS_THREADSAFE
class ScopedFakeTestPartResultReporterWithThreadsTest
: public ScopedFakeTestPartResultReporterTest {
protected:
static void AddFailureInOtherThread(FailureMode failure) {
- pthread_t tid;
- pthread_create(&tid,
- NULL,
- ScopedFakeTestPartResultReporterWithThreadsTest::
- FailureThread,
- &failure);
- pthread_join(tid, NULL);
- }
- private:
- static void* FailureThread(void* attr) {
- FailureMode* failure = static_cast<FailureMode*>(attr);
- AddFailure(*failure);
- return NULL;
+ ThreadWithParam<FailureMode> thread(&AddFailure, failure, NULL);
+ thread.Join();
}
};
@@ -1573,7 +1305,7 @@ TEST_F(ScopedFakeTestPartResultReporterWithThreadsTest,
EXPECT_TRUE(results.GetTestPartResult(3).fatally_failed());
}
-#endif // GTEST_IS_THREADSAFE && GTEST_HAS_PTHREAD
+#endif // GTEST_IS_THREADSAFE
// Tests EXPECT_FATAL_FAILURE{,ON_ALL_THREADS}. Makes sure that they
// work even if the failure is generated in a called function rather than
@@ -1621,7 +1353,7 @@ void DoesNotAbortHelper(bool* aborted) {
}
#ifdef __BORLANDC__
-// Restores warnings after previous "#pragma option push" supressed them
+// Restores warnings after previous "#pragma option push" suppressed them.
#pragma option pop
#endif
@@ -1639,7 +1371,7 @@ static int global_var = 0;
#define GTEST_USE_UNPROTECTED_COMMA_ global_var++, global_var++
TEST_F(ExpectFatalFailureTest, AcceptsMacroThatExpandsToUnprotectedComma) {
-#ifndef __BORLANDC__
+#if !defined(__BORLANDC__) || __BORLANDC__ >= 0x600
// ICE's in C++Builder 2007.
EXPECT_FATAL_FAILURE({
GTEST_USE_UNPROTECTED_COMMA_;
@@ -1684,7 +1416,7 @@ TEST_F(ExpectNonfatalFailureTest, AcceptsMacroThatExpandsToUnprotectedComma) {
}, "");
}
-#if GTEST_IS_THREADSAFE && GTEST_HAS_PTHREAD
+#if GTEST_IS_THREADSAFE
typedef ScopedFakeTestPartResultReporterWithThreadsTest
ExpectFailureWithThreadsTest;
@@ -1699,7 +1431,7 @@ TEST_F(ExpectFailureWithThreadsTest, ExpectNonFatalFailureOnAllThreads) {
AddFailureInOtherThread(NONFATAL_FAILURE), "Expected non-fatal failure.");
}
-#endif // GTEST_IS_THREADSAFE && GTEST_HAS_PTHREAD
+#endif // GTEST_IS_THREADSAFE
// Tests the TestProperty class.
@@ -1717,61 +1449,12 @@ TEST(TestPropertyTest, SetValue) {
EXPECT_STREQ("value_2", property.value());
}
-// Tests the TestPartResult class.
-
-TEST(TestPartResultTest, ConstructorWorks) {
- Message message;
- message << "something is terribly wrong";
- message << static_cast<const char*>(testing::internal::kStackTraceMarker);
- message << "some unimportant stack trace";
-
- const TestPartResult result(TestPartResult::kNonFatalFailure,
- "some_file.cc",
- 42,
- message.GetString().c_str());
-
- EXPECT_EQ(TestPartResult::kNonFatalFailure, result.type());
- EXPECT_STREQ("some_file.cc", result.file_name());
- EXPECT_EQ(42, result.line_number());
- EXPECT_STREQ(message.GetString().c_str(), result.message());
- EXPECT_STREQ("something is terribly wrong", result.summary());
-}
-
-TEST(TestPartResultTest, ResultAccessorsWork) {
- const TestPartResult success(TestPartResult::kSuccess,
- "file.cc",
- 42,
- "message");
- EXPECT_TRUE(success.passed());
- EXPECT_FALSE(success.failed());
- EXPECT_FALSE(success.nonfatally_failed());
- EXPECT_FALSE(success.fatally_failed());
-
- const TestPartResult nonfatal_failure(TestPartResult::kNonFatalFailure,
- "file.cc",
- 42,
- "message");
- EXPECT_FALSE(nonfatal_failure.passed());
- EXPECT_TRUE(nonfatal_failure.failed());
- EXPECT_TRUE(nonfatal_failure.nonfatally_failed());
- EXPECT_FALSE(nonfatal_failure.fatally_failed());
-
- const TestPartResult fatal_failure(TestPartResult::kFatalFailure,
- "file.cc",
- 42,
- "message");
- EXPECT_FALSE(fatal_failure.passed());
- EXPECT_TRUE(fatal_failure.failed());
- EXPECT_FALSE(fatal_failure.nonfatally_failed());
- EXPECT_TRUE(fatal_failure.fatally_failed());
-}
-
// Tests the TestResult class
// The test fixture for testing TestResult.
class TestResultTest : public Test {
protected:
- typedef Vector<TestPartResult> TPRVector;
+ typedef std::vector<TestPartResult> TPRVector;
// We make use of 2 TestPartResult objects,
TestPartResult * pr1, * pr2;
@@ -1798,23 +1481,23 @@ class TestResultTest : public Test {
r2 = new TestResult();
// In order to test TestResult, we need to modify its internal
- // state, in particular the TestPartResult Vector it holds.
- // test_part_results() returns a const reference to this Vector.
+ // state, in particular the TestPartResult vector it holds.
+ // test_part_results() returns a const reference to this vector.
// We cast it to a non-const object s.t. it can be modified (yes,
// this is a hack).
- TPRVector* results1 = const_cast<Vector<TestPartResult> *>(
+ TPRVector* results1 = const_cast<TPRVector*>(
&TestResultAccessor::test_part_results(*r1));
- TPRVector* results2 = const_cast<Vector<TestPartResult> *>(
+ TPRVector* results2 = const_cast<TPRVector*>(
&TestResultAccessor::test_part_results(*r2));
// r0 is an empty TestResult.
// r1 contains a single SUCCESS TestPartResult.
- results1->PushBack(*pr1);
+ results1->push_back(*pr1);
// r2 contains a SUCCESS, and a FAILURE.
- results2->PushBack(*pr1);
- results2->PushBack(*pr2);
+ results2->push_back(*pr1);
+ results2->push_back(*pr2);
}
virtual void TearDown() {
@@ -1869,12 +1552,8 @@ typedef TestResultTest TestResultDeathTest;
TEST_F(TestResultDeathTest, GetTestPartResult) {
CompareTestPartResult(*pr1, r2->GetTestPartResult(0));
CompareTestPartResult(*pr2, r2->GetTestPartResult(1));
- EXPECT_DEATH_IF_SUPPORTED(
- r2->GetTestPartResult(2),
- "Invalid Vector index 2: must be in range \\[0, 1\\]\\.");
- EXPECT_DEATH_IF_SUPPORTED(
- r2->GetTestPartResult(-1),
- "Invalid Vector index -1: must be in range \\[0, 1\\]\\.");
+ EXPECT_DEATH_IF_SUPPORTED(r2->GetTestPartResult(2), "");
+ EXPECT_DEATH_IF_SUPPORTED(r2->GetTestPartResult(-1), "");
}
// Tests TestResult has no properties when none are added.
@@ -1956,12 +1635,8 @@ TEST(TestResultPropertyDeathTest, GetTestProperty) {
EXPECT_STREQ("key_3", fetched_property_3.key());
EXPECT_STREQ("3", fetched_property_3.value());
- EXPECT_DEATH_IF_SUPPORTED(
- test_result.GetTestProperty(3),
- "Invalid Vector index 3: must be in range \\[0, 2\\]\\.");
- EXPECT_DEATH_IF_SUPPORTED(
- test_result.GetTestProperty(-1),
- "Invalid Vector index -1: must be in range \\[0, 2\\]\\.");
+ EXPECT_DEATH_IF_SUPPORTED(test_result.GetTestProperty(3), "");
+ EXPECT_DEATH_IF_SUPPORTED(test_result.GetTestProperty(-1), "");
}
// When a property using a reserved key is supplied to this function, it tests
@@ -2022,6 +1697,7 @@ class GTestFlagSaverTest : public Test {
GTEST_FLAG(random_seed) = 0;
GTEST_FLAG(repeat) = 1;
GTEST_FLAG(shuffle) = false;
+ GTEST_FLAG(stack_trace_depth) = kMaxStackTraceDepth;
GTEST_FLAG(throw_on_failure) = false;
}
@@ -2047,6 +1723,7 @@ class GTestFlagSaverTest : public Test {
EXPECT_EQ(0, GTEST_FLAG(random_seed));
EXPECT_EQ(1, GTEST_FLAG(repeat));
EXPECT_FALSE(GTEST_FLAG(shuffle));
+ EXPECT_EQ(kMaxStackTraceDepth, GTEST_FLAG(stack_trace_depth));
EXPECT_FALSE(GTEST_FLAG(throw_on_failure));
GTEST_FLAG(also_run_disabled_tests) = true;
@@ -2061,6 +1738,7 @@ class GTestFlagSaverTest : public Test {
GTEST_FLAG(random_seed) = 1;
GTEST_FLAG(repeat) = 100;
GTEST_FLAG(shuffle) = true;
+ GTEST_FLAG(stack_trace_depth) = 1;
GTEST_FLAG(throw_on_failure) = true;
}
private:
@@ -2091,7 +1769,7 @@ static void SetEnv(const char* name, const char* value) {
#if GTEST_OS_WINDOWS_MOBILE
// Environment variables are not supported on Windows CE.
return;
-#elif defined(__BORLANDC__)
+#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9)
// C++Builder's putenv only stores a pointer to its parameter; we have to
// ensure that the string remains valid as long as it might be needed.
// We use an std::map to do so.
@@ -2104,7 +1782,11 @@ static void SetEnv(const char* name, const char* value) {
prev_env = added_env[name];
}
added_env[name] = new String((Message() << name << "=" << value).GetString());
- putenv(added_env[name]->c_str());
+
+ // The standard signature of putenv accepts a 'char*' argument. Other
+ // implementations, like C++Builder's, accept a 'const char*'.
+ // We cast away the 'const' since that would work for both variants.
+ putenv(const_cast<char*>(added_env[name]->c_str()));
delete prev_env;
#elif GTEST_OS_WINDOWS // If we are on Windows proper.
_putenv((Message() << name << "=" << value).GetString().c_str());
@@ -2418,6 +2100,25 @@ AssertionResult AssertIsEven(const char* expr, int n) {
return AssertionFailure(msg);
}
+// A predicate function that returns AssertionResult for use in
+// EXPECT/ASSERT_TRUE/FALSE.
+AssertionResult ResultIsEven(int n) {
+ if (IsEven(n))
+ return AssertionSuccess() << n << " is even";
+ else
+ return AssertionFailure() << n << " is odd";
+}
+
+// A predicate function that returns AssertionResult but gives no
+// explanation why it succeeds. Needed for testing that
+// EXPECT/ASSERT_FALSE handles such functions correctly.
+AssertionResult ResultIsEvenNoExplanation(int n) {
+ if (IsEven(n))
+ return AssertionSuccess();
+ else
+ return AssertionFailure() << n << " is odd";
+}
+
// A predicate-formatter functor that asserts the argument is an even
// number.
struct AssertIsEvenFunctor {
@@ -2615,10 +2316,6 @@ TEST(PredTest, SingleEvaluationOnFailure) {
// Some helper functions for testing using overloaded/template
// functions with ASSERT_PREDn and EXPECT_PREDn.
-bool IsPositive(int n) {
- return n > 0;
-}
-
bool IsPositive(double x) {
return x > 0;
}
@@ -2841,8 +2538,6 @@ TEST(IsSubstringTest, GeneratesCorrectMessageForCString) {
"needle", "haystack").failure_message());
}
-#if GTEST_HAS_STD_STRING
-
// Tests that IsSubstring returns the correct result when the input
// argument type is ::std::string.
TEST(IsSubstringTest, ReturnsCorrectResultsForStdString) {
@@ -2850,8 +2545,6 @@ TEST(IsSubstringTest, ReturnsCorrectResultsForStdString) {
EXPECT_FALSE(IsSubstring("", "", "hello", std::string("world")));
}
-#endif // GTEST_HAS_STD_STRING
-
#if GTEST_HAS_STD_WSTRING
// Tests that IsSubstring returns the correct result when the input
// argument type is ::std::wstring.
@@ -2902,8 +2595,6 @@ TEST(IsNotSubstringTest, GeneratesCorrectMessageForWideCString) {
L"needle", L"two needles").failure_message());
}
-#if GTEST_HAS_STD_STRING
-
// Tests that IsNotSubstring returns the correct result when the input
// argument type is ::std::string.
TEST(IsNotSubstringTest, ReturnsCorrectResultsForStdString) {
@@ -2923,8 +2614,6 @@ TEST(IsNotSubstringTest, GeneratesCorrectMessageForStdString) {
::std::string("needle"), "two needles").failure_message());
}
-#endif // GTEST_HAS_STD_STRING
-
#if GTEST_HAS_STD_WSTRING
// Tests that IsNotSubstring returns the correct result when the input
@@ -3043,7 +2732,10 @@ TEST_F(FloatTest, AlmostZeros) {
// In C++Builder, names within local classes (such as used by
// EXPECT_FATAL_FAILURE) cannot be resolved against static members of the
// scoping class. Use a static local alias as a workaround.
- static const FloatTest::TestValues& v(this->values_);
+ // We use the assignment syntax since some compilers, like Sun Studio,
+ // don't allow initializing references using construction syntax
+ // (parentheses).
+ static const FloatTest::TestValues& v = this->values_;
EXPECT_FLOAT_EQ(0.0, v.close_to_positive_zero);
EXPECT_FLOAT_EQ(-0.0, v.close_to_negative_zero);
@@ -3095,7 +2787,10 @@ TEST_F(FloatTest, NaN) {
// In C++Builder, names within local classes (such as used by
// EXPECT_FATAL_FAILURE) cannot be resolved against static members of the
// scoping class. Use a static local alias as a workaround.
- static const FloatTest::TestValues& v(this->values_);
+ // We use the assignment syntax since some compilers, like Sun Studio,
+ // don't allow initializing references using construction syntax
+ // (parentheses).
+ static const FloatTest::TestValues& v = this->values_;
EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(v.nan1, v.nan1),
"v.nan1");
@@ -3130,9 +2825,9 @@ TEST_F(FloatTest, Commutative) {
TEST_F(FloatTest, EXPECT_NEAR) {
EXPECT_NEAR(-1.0f, -1.1f, 0.2f);
EXPECT_NEAR(2.0f, 3.0f, 1.0f);
- EXPECT_NONFATAL_FAILURE(EXPECT_NEAR(1.0f,1.2f, 0.1f), // NOLINT
- "The difference between 1.0f and 1.2f is 0.2, "
- "which exceeds 0.1f");
+ EXPECT_NONFATAL_FAILURE(EXPECT_NEAR(1.0f,1.5f, 0.25f), // NOLINT
+ "The difference between 1.0f and 1.5f is 0.5, "
+ "which exceeds 0.25f");
// To work around a bug in gcc 2.95.0, there is intentionally no
// space after the first comma in the previous line.
}
@@ -3141,9 +2836,9 @@ TEST_F(FloatTest, EXPECT_NEAR) {
TEST_F(FloatTest, ASSERT_NEAR) {
ASSERT_NEAR(-1.0f, -1.1f, 0.2f);
ASSERT_NEAR(2.0f, 3.0f, 1.0f);
- EXPECT_FATAL_FAILURE(ASSERT_NEAR(1.0f,1.2f, 0.1f), // NOLINT
- "The difference between 1.0f and 1.2f is 0.2, "
- "which exceeds 0.1f");
+ EXPECT_FATAL_FAILURE(ASSERT_NEAR(1.0f,1.5f, 0.25f), // NOLINT
+ "The difference between 1.0f and 1.5f is 0.5, "
+ "which exceeds 0.25f");
// To work around a bug in gcc 2.95.0, there is intentionally no
// space after the first comma in the previous line.
}
@@ -3210,7 +2905,10 @@ TEST_F(DoubleTest, AlmostZeros) {
// In C++Builder, names within local classes (such as used by
// EXPECT_FATAL_FAILURE) cannot be resolved against static members of the
// scoping class. Use a static local alias as a workaround.
- static const DoubleTest::TestValues& v(this->values_);
+ // We use the assignment syntax since some compilers, like Sun Studio,
+ // don't allow initializing references using construction syntax
+ // (parentheses).
+ static const DoubleTest::TestValues& v = this->values_;
EXPECT_DOUBLE_EQ(0.0, v.close_to_positive_zero);
EXPECT_DOUBLE_EQ(-0.0, v.close_to_negative_zero);
@@ -3260,7 +2958,10 @@ TEST_F(DoubleTest, NaN) {
// In C++Builder, names within local classes (such as used by
// EXPECT_FATAL_FAILURE) cannot be resolved against static members of the
// scoping class. Use a static local alias as a workaround.
- static const DoubleTest::TestValues& v(this->values_);
+ // We use the assignment syntax since some compilers, like Sun Studio,
+ // don't allow initializing references using construction syntax
+ // (parentheses).
+ static const DoubleTest::TestValues& v = this->values_;
// Nokia's STLport crashes if we try to output infinity or NaN.
EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(v.nan1, v.nan1),
@@ -3296,9 +2997,9 @@ TEST_F(DoubleTest, Commutative) {
TEST_F(DoubleTest, EXPECT_NEAR) {
EXPECT_NEAR(-1.0, -1.1, 0.2);
EXPECT_NEAR(2.0, 3.0, 1.0);
- EXPECT_NONFATAL_FAILURE(EXPECT_NEAR(1.0, 1.2, 0.1), // NOLINT
- "The difference between 1.0 and 1.2 is 0.2, "
- "which exceeds 0.1");
+ EXPECT_NONFATAL_FAILURE(EXPECT_NEAR(1.0, 1.5, 0.25), // NOLINT
+ "The difference between 1.0 and 1.5 is 0.5, "
+ "which exceeds 0.25");
// To work around a bug in gcc 2.95.0, there is intentionally no
// space after the first comma in the previous statement.
}
@@ -3307,9 +3008,9 @@ TEST_F(DoubleTest, EXPECT_NEAR) {
TEST_F(DoubleTest, ASSERT_NEAR) {
ASSERT_NEAR(-1.0, -1.1, 0.2);
ASSERT_NEAR(2.0, 3.0, 1.0);
- EXPECT_FATAL_FAILURE(ASSERT_NEAR(1.0, 1.2, 0.1), // NOLINT
- "The difference between 1.0 and 1.2 is 0.2, "
- "which exceeds 0.1");
+ EXPECT_FATAL_FAILURE(ASSERT_NEAR(1.0, 1.5, 0.25), // NOLINT
+ "The difference between 1.0 and 1.5 is 0.5, "
+ "which exceeds 0.25");
// To work around a bug in gcc 2.95.0, there is intentionally no
// space after the first comma in the previous statement.
}
@@ -3786,6 +3487,23 @@ TEST(AssertionTest, ASSERT_TRUE) {
"2 < 1");
}
+// Tests ASSERT_TRUE(predicate) for predicates returning AssertionResult.
+TEST(AssertionTest, AssertTrueWithAssertionResult) {
+ ASSERT_TRUE(ResultIsEven(2));
+#if !defined(__BORLANDC__) || __BORLANDC__ >= 0x600
+ // ICE's in C++Builder 2007.
+ EXPECT_FATAL_FAILURE(ASSERT_TRUE(ResultIsEven(3)),
+ "Value of: ResultIsEven(3)\n"
+ " Actual: false (3 is odd)\n"
+ "Expected: true");
+#endif
+ ASSERT_TRUE(ResultIsEvenNoExplanation(2));
+ EXPECT_FATAL_FAILURE(ASSERT_TRUE(ResultIsEvenNoExplanation(3)),
+ "Value of: ResultIsEvenNoExplanation(3)\n"
+ " Actual: false (3 is odd)\n"
+ "Expected: true");
+}
+
// Tests ASSERT_FALSE.
TEST(AssertionTest, ASSERT_FALSE) {
ASSERT_FALSE(2 < 1); // NOLINT
@@ -3795,6 +3513,23 @@ TEST(AssertionTest, ASSERT_FALSE) {
"Expected: false");
}
+// Tests ASSERT_FALSE(predicate) for predicates returning AssertionResult.
+TEST(AssertionTest, AssertFalseWithAssertionResult) {
+ ASSERT_FALSE(ResultIsEven(3));
+#if !defined(__BORLANDC__) || __BORLANDC__ >= 0x600
+ // ICE's in C++Builder 2007.
+ EXPECT_FATAL_FAILURE(ASSERT_FALSE(ResultIsEven(2)),
+ "Value of: ResultIsEven(2)\n"
+ " Actual: true (2 is even)\n"
+ "Expected: false");
+#endif
+ ASSERT_FALSE(ResultIsEvenNoExplanation(3));
+ EXPECT_FATAL_FAILURE(ASSERT_FALSE(ResultIsEvenNoExplanation(2)),
+ "Value of: ResultIsEvenNoExplanation(2)\n"
+ " Actual: true\n"
+ "Expected: false");
+}
+
#ifdef __BORLANDC__
// Restores warnings after previous "#pragma option push" supressed them
#pragma option pop
@@ -3822,10 +3557,7 @@ TEST(AssertionTest, ASSERT_EQ) {
}
// Tests ASSERT_EQ(NULL, pointer).
-#if !GTEST_OS_SYMBIAN
-// The NULL-detection template magic fails to compile with
-// the Nokia compiler and crashes the ARM compiler, hence
-// not testing on Symbian.
+#if GTEST_CAN_COMPARE_NULL
TEST(AssertionTest, ASSERT_EQ_NULL) {
// A success.
const char* p = NULL;
@@ -3840,7 +3572,7 @@ TEST(AssertionTest, ASSERT_EQ_NULL) {
EXPECT_FATAL_FAILURE(ASSERT_EQ(NULL, &n),
"Value of: &n\n");
}
-#endif // !GTEST_OS_SYMBIAN
+#endif // GTEST_CAN_COMPARE_NULL
// Tests ASSERT_EQ(0, non_pointer). Since the literal 0 can be
// treated as a null pointer by the compiler, we need to make sure
@@ -3902,13 +3634,15 @@ void ThrowNothing() {}
// Tests ASSERT_THROW.
TEST(AssertionTest, ASSERT_THROW) {
ASSERT_THROW(ThrowAnInteger(), int);
-#if !defined(__BORLANDC__) || __BORLANDC__ >= 0x600 || defined(_DEBUG)
- // ICE's in C++Builder 2007 (Release build).
+
+#ifndef __BORLANDC__
+ // ICE's in C++Builder 2007 and 2009.
EXPECT_FATAL_FAILURE(
ASSERT_THROW(ThrowAnInteger(), bool),
"Expected: ThrowAnInteger() throws an exception of type bool.\n"
" Actual: it throws a different type.");
#endif
+
EXPECT_FATAL_FAILURE(
ASSERT_THROW(ThrowNothing(), bool),
"Expected: ThrowNothing() throws an exception of type bool.\n"
@@ -3955,7 +3689,7 @@ TEST(AssertionTest, NonFixtureSubroutine) {
// An uncopyable class.
class Uncopyable {
public:
- explicit Uncopyable(int value) : value_(value) {}
+ explicit Uncopyable(int a_value) : value_(a_value) {}
int value() const { return value_; }
bool operator==(const Uncopyable& rhs) const {
@@ -4017,7 +3751,8 @@ TEST(AssertionTest, ExpectWorksWithUncopyableObject) {
// The version of gcc used in XCode 2.2 has a bug and doesn't allow
// anonymous enums in assertions. Therefore the following test is not
// done on Mac.
-#if !GTEST_OS_MAC
+// Sun Studio also rejects this code.
+#if !GTEST_OS_MAC && !defined(__SUNPRO_CC)
// Tests using assertions with anonymous enums.
enum {
@@ -4062,7 +3797,7 @@ TEST(AssertionTest, AnonymousEnum) {
"Value of: CASE_B");
}
-#endif // !GTEST_OS_MAC
+#endif // !GTEST_OS_MAC && !defined(__SUNPRO_CC)
#if GTEST_OS_WINDOWS
@@ -4336,6 +4071,20 @@ TEST(ExpectTest, EXPECT_TRUE) {
"2 > 3");
}
+// Tests EXPECT_TRUE(predicate) for predicates returning AssertionResult.
+TEST(ExpectTest, ExpectTrueWithAssertionResult) {
+ EXPECT_TRUE(ResultIsEven(2));
+ EXPECT_NONFATAL_FAILURE(EXPECT_TRUE(ResultIsEven(3)),
+ "Value of: ResultIsEven(3)\n"
+ " Actual: false (3 is odd)\n"
+ "Expected: true");
+ EXPECT_TRUE(ResultIsEvenNoExplanation(2));
+ EXPECT_NONFATAL_FAILURE(EXPECT_TRUE(ResultIsEvenNoExplanation(3)),
+ "Value of: ResultIsEvenNoExplanation(3)\n"
+ " Actual: false (3 is odd)\n"
+ "Expected: true");
+}
+
// Tests EXPECT_FALSE.
TEST(ExpectTest, EXPECT_FALSE) {
EXPECT_FALSE(2 < 1); // NOLINT
@@ -4347,6 +4096,20 @@ TEST(ExpectTest, EXPECT_FALSE) {
"2 < 3");
}
+// Tests EXPECT_FALSE(predicate) for predicates returning AssertionResult.
+TEST(ExpectTest, ExpectFalseWithAssertionResult) {
+ EXPECT_FALSE(ResultIsEven(3));
+ EXPECT_NONFATAL_FAILURE(EXPECT_FALSE(ResultIsEven(2)),
+ "Value of: ResultIsEven(2)\n"
+ " Actual: true (2 is even)\n"
+ "Expected: false");
+ EXPECT_FALSE(ResultIsEvenNoExplanation(3));
+ EXPECT_NONFATAL_FAILURE(EXPECT_FALSE(ResultIsEvenNoExplanation(2)),
+ "Value of: ResultIsEvenNoExplanation(2)\n"
+ " Actual: true\n"
+ "Expected: false");
+}
+
#ifdef __BORLANDC__
// Restores warnings after previous "#pragma option push" supressed them
#pragma option pop
@@ -4375,7 +4138,7 @@ TEST(ExpectTest, EXPECT_EQ_Double) {
"5.1");
}
-#if !GTEST_OS_SYMBIAN
+#if GTEST_CAN_COMPARE_NULL
// Tests EXPECT_EQ(NULL, pointer).
TEST(ExpectTest, EXPECT_EQ_NULL) {
// A success.
@@ -4391,7 +4154,7 @@ TEST(ExpectTest, EXPECT_EQ_NULL) {
EXPECT_NONFATAL_FAILURE(EXPECT_EQ(NULL, &n),
"Value of: &n\n");
}
-#endif // !GTEST_OS_SYMBIAN
+#endif // GTEST_CAN_COMPARE_NULL
// Tests EXPECT_EQ(0, non_pointer). Since the literal 0 can be
// treated as a null pointer by the compiler, we need to make sure
@@ -4542,7 +4305,6 @@ TEST(StreamableToStringTest, NullCString) {
// Tests using streamable values as assertion messages.
-#if GTEST_HAS_STD_STRING
// Tests using std::string as an assertion message.
TEST(StreamableTest, string) {
static const std::string str(
@@ -4563,8 +4325,6 @@ TEST(StreamableTest, stringWithEmbeddedNUL) {
"Here's a NUL\\0 and some more string");
}
-#endif // GTEST_HAS_STD_STRING
-
// Tests that we can output a NUL char.
TEST(StreamableTest, NULChar) {
EXPECT_FATAL_FAILURE({ // NOLINT
@@ -4687,7 +4447,6 @@ TEST(EqAssertionTest, WideChar) {
"Value of: wchar");
}
-#if GTEST_HAS_STD_STRING
// Tests using ::std::string values in {EXPECT|ASSERT}_EQ.
TEST(EqAssertionTest, StdString) {
// Compares a const char* to an std::string that has identical
@@ -4718,8 +4477,6 @@ TEST(EqAssertionTest, StdString) {
" Actual: \"A \\0 in the middle\"");
}
-#endif // GTEST_HAS_STD_STRING
-
#if GTEST_HAS_STD_WSTRING
// Tests using ::std::wstring values in {EXPECT|ASSERT}_EQ.
@@ -4952,11 +4709,68 @@ TEST_F(TestLifeCycleTest, Test2) {
} // namespace
+// Tests that the copy constructor works when it is NOT optimized away by
+// the compiler.
+TEST(AssertionResultTest, CopyConstructorWorksWhenNotOptimied) {
+ // Checks that the copy constructor doesn't try to dereference NULL pointers
+ // in the source object.
+ AssertionResult r1 = AssertionSuccess();
+ AssertionResult r2 = r1;
+ // The following line is added to prevent the compiler from optimizing
+ // away the constructor call.
+ r1 << "abc";
+
+ AssertionResult r3 = r1;
+ EXPECT_EQ(static_cast<bool>(r3), static_cast<bool>(r1));
+ EXPECT_STREQ("abc", r1.message());
+}
+
+// Tests that AssertionSuccess and AssertionFailure construct
+// AssertionResult objects as expected.
+TEST(AssertionResultTest, ConstructionWorks) {
+ AssertionResult r1 = AssertionSuccess();
+ EXPECT_TRUE(r1);
+ EXPECT_STREQ("", r1.message());
+
+ AssertionResult r2 = AssertionSuccess() << "abc";
+ EXPECT_TRUE(r2);
+ EXPECT_STREQ("abc", r2.message());
+
+ AssertionResult r3 = AssertionFailure();
+ EXPECT_FALSE(r3);
+ EXPECT_STREQ("", r3.message());
+
+ AssertionResult r4 = AssertionFailure() << "def";
+ EXPECT_FALSE(r4);
+ EXPECT_STREQ("def", r4.message());
+
+ AssertionResult r5 = AssertionFailure(Message() << "ghi");
+ EXPECT_FALSE(r5);
+ EXPECT_STREQ("ghi", r5.message());
+}
+
+// Tests that the negation fips the predicate result but keeps the message.
+TEST(AssertionResultTest, NegationWorks) {
+ AssertionResult r1 = AssertionSuccess() << "abc";
+ EXPECT_FALSE(!r1);
+ EXPECT_STREQ("abc", (!r1).message());
+
+ AssertionResult r2 = AssertionFailure() << "def";
+ EXPECT_TRUE(!r2);
+ EXPECT_STREQ("def", (!r2).message());
+}
+
+TEST(AssertionResultTest, StreamingWorks) {
+ AssertionResult r = AssertionSuccess();
+ r << "abc" << 'd' << 0 << true;
+ EXPECT_STREQ("abcd0true", r.message());
+}
+
// Tests streaming a user type whose definition and operator << are
// both in the global namespace.
class Base {
public:
- explicit Base(int x) : x_(x) {}
+ explicit Base(int an_x) : x_(an_x) {}
int x() const { return x_; }
private:
int x_;
@@ -4983,7 +4797,7 @@ TEST(MessageTest, CanStreamUserTypeInGlobalNameSpace) {
namespace {
class MyTypeInUnnamedNameSpace : public Base {
public:
- explicit MyTypeInUnnamedNameSpace(int x): Base(x) {}
+ explicit MyTypeInUnnamedNameSpace(int an_x): Base(an_x) {}
};
std::ostream& operator<<(std::ostream& os,
const MyTypeInUnnamedNameSpace& val) {
@@ -5008,7 +4822,7 @@ TEST(MessageTest, CanStreamUserTypeInUnnamedNameSpace) {
namespace namespace1 {
class MyTypeInNameSpace1 : public Base {
public:
- explicit MyTypeInNameSpace1(int x): Base(x) {}
+ explicit MyTypeInNameSpace1(int an_x): Base(an_x) {}
};
std::ostream& operator<<(std::ostream& os,
const MyTypeInNameSpace1& val) {
@@ -5033,7 +4847,7 @@ TEST(MessageTest, CanStreamUserTypeInUserNameSpace) {
namespace namespace2 {
class MyTypeInNameSpace2 : public ::Base {
public:
- explicit MyTypeInNameSpace2(int x): Base(x) {}
+ explicit MyTypeInNameSpace2(int an_x): Base(an_x) {}
};
} // namespace namespace2
std::ostream& operator<<(std::ostream& os,
@@ -5215,6 +5029,7 @@ struct Flags {
random_seed(0),
repeat(1),
shuffle(false),
+ stack_trace_depth(kMaxStackTraceDepth),
throw_on_failure(false) {}
// Factory methods.
@@ -5307,6 +5122,14 @@ struct Flags {
return flags;
}
+ // Creates a Flags struct where the GTEST_FLAG(stack_trace_depth) flag has
+ // the given value.
+ static Flags StackTraceDepth(Int32 stack_trace_depth) {
+ Flags flags;
+ flags.stack_trace_depth = stack_trace_depth;
+ return flags;
+ }
+
// Creates a Flags struct where the gtest_throw_on_failure flag has
// the given value.
static Flags ThrowOnFailure(bool throw_on_failure) {
@@ -5327,6 +5150,7 @@ struct Flags {
Int32 random_seed;
Int32 repeat;
bool shuffle;
+ Int32 stack_trace_depth;
bool throw_on_failure;
};
@@ -5346,6 +5170,7 @@ class InitGoogleTestTest : public Test {
GTEST_FLAG(random_seed) = 0;
GTEST_FLAG(repeat) = 1;
GTEST_FLAG(shuffle) = false;
+ GTEST_FLAG(stack_trace_depth) = kMaxStackTraceDepth;
GTEST_FLAG(throw_on_failure) = false;
}
@@ -5375,6 +5200,7 @@ class InitGoogleTestTest : public Test {
EXPECT_EQ(expected.repeat, GTEST_FLAG(repeat));
EXPECT_EQ(expected.shuffle, GTEST_FLAG(shuffle));
EXPECT_EQ(expected.throw_on_failure, GTEST_FLAG(throw_on_failure));
+ EXPECT_EQ(expected.stack_trace_depth, GTEST_FLAG(stack_trace_depth));
}
// Parses a command line (specified by argc1 and argv1), then
@@ -5383,23 +5209,52 @@ class InitGoogleTestTest : public Test {
template <typename CharType>
static void TestParsingFlags(int argc1, const CharType** argv1,
int argc2, const CharType** argv2,
- const Flags& expected) {
+ const Flags& expected, bool should_print_help) {
+ const bool saved_help_flag = ::testing::internal::g_help_flag;
+ ::testing::internal::g_help_flag = false;
+
+#if GTEST_HAS_STREAM_REDIRECTION_
+ CaptureStdout();
+#endif // GTEST_HAS_STREAM_REDIRECTION_
+
// Parses the command line.
internal::ParseGoogleTestFlagsOnly(&argc1, const_cast<CharType**>(argv1));
+#if GTEST_HAS_STREAM_REDIRECTION_
+ const String captured_stdout = GetCapturedStdout();
+#endif // GTEST_HAS_STREAM_REDIRECTION_
+
// Verifies the flag values.
CheckFlags(expected);
// Verifies that the recognized flags are removed from the command
// line.
AssertStringArrayEq(argc1 + 1, argv1, argc2 + 1, argv2);
+
+ // ParseGoogleTestFlagsOnly should neither set g_help_flag nor print the
+ // help message for the flags it recognizes.
+ EXPECT_EQ(should_print_help, ::testing::internal::g_help_flag);
+
+#if GTEST_HAS_STREAM_REDIRECTION_
+ const char* const expected_help_fragment =
+ "This program contains tests written using";
+ if (should_print_help) {
+ EXPECT_PRED_FORMAT2(IsSubstring, expected_help_fragment, captured_stdout);
+ } else {
+ EXPECT_PRED_FORMAT2(IsNotSubstring,
+ expected_help_fragment, captured_stdout);
+ }
+#endif // GTEST_HAS_STREAM_REDIRECTION_
+
+ ::testing::internal::g_help_flag = saved_help_flag;
}
// This macro wraps TestParsingFlags s.t. the user doesn't need
// to specify the array sizes.
-#define GTEST_TEST_PARSING_FLAGS_(argv1, argv2, expected) \
+#define GTEST_TEST_PARSING_FLAGS_(argv1, argv2, expected, should_print_help) \
TestParsingFlags(sizeof(argv1)/sizeof(*argv1) - 1, argv1, \
- sizeof(argv2)/sizeof(*argv2) - 1, argv2, expected)
+ sizeof(argv2)/sizeof(*argv2) - 1, argv2, \
+ expected, should_print_help)
};
// Tests parsing an empty command line.
@@ -5412,7 +5267,7 @@ TEST_F(InitGoogleTestTest, Empty) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags());
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags(), false);
}
// Tests parsing a command line that has no flag.
@@ -5427,7 +5282,7 @@ TEST_F(InitGoogleTestTest, NoFlag) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags());
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags(), false);
}
// Tests parsing a bad --gtest_filter flag.
@@ -5444,7 +5299,7 @@ TEST_F(InitGoogleTestTest, FilterBad) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter(""));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter(""), true);
}
// Tests parsing an empty --gtest_filter flag.
@@ -5460,7 +5315,7 @@ TEST_F(InitGoogleTestTest, FilterEmpty) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter(""));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter(""), false);
}
// Tests parsing a non-empty --gtest_filter flag.
@@ -5476,7 +5331,7 @@ TEST_F(InitGoogleTestTest, FilterNonEmpty) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter("abc"));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter("abc"), false);
}
// Tests parsing --gtest_break_on_failure.
@@ -5492,7 +5347,7 @@ TEST_F(InitGoogleTestTest, BreakOnFailureWithoutValue) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(true));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(true), false);
}
// Tests parsing --gtest_break_on_failure=0.
@@ -5508,7 +5363,7 @@ TEST_F(InitGoogleTestTest, BreakOnFailureFalse_0) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(false));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(false), false);
}
// Tests parsing --gtest_break_on_failure=f.
@@ -5524,7 +5379,7 @@ TEST_F(InitGoogleTestTest, BreakOnFailureFalse_f) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(false));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(false), false);
}
// Tests parsing --gtest_break_on_failure=F.
@@ -5540,7 +5395,7 @@ TEST_F(InitGoogleTestTest, BreakOnFailureFalse_F) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(false));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(false), false);
}
// Tests parsing a --gtest_break_on_failure flag that has a "true"
@@ -5557,7 +5412,7 @@ TEST_F(InitGoogleTestTest, BreakOnFailureTrue) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(true));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(true), false);
}
// Tests parsing --gtest_catch_exceptions.
@@ -5573,7 +5428,7 @@ TEST_F(InitGoogleTestTest, CatchExceptions) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::CatchExceptions(true));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::CatchExceptions(true), false);
}
// Tests parsing --gtest_death_test_use_fork.
@@ -5589,7 +5444,7 @@ TEST_F(InitGoogleTestTest, DeathTestUseFork) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::DeathTestUseFork(true));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::DeathTestUseFork(true), false);
}
// Tests having the same flag twice with different values. The
@@ -5607,7 +5462,7 @@ TEST_F(InitGoogleTestTest, DuplicatedFlags) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter("b"));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter("b"), false);
}
// Tests having an unrecognized flag on the command line.
@@ -5629,7 +5484,7 @@ TEST_F(InitGoogleTestTest, UnrecognizedFlag) {
Flags flags;
flags.break_on_failure = true;
flags.filter = "b";
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, flags);
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, flags, false);
}
// Tests having a --gtest_list_tests flag
@@ -5645,7 +5500,7 @@ TEST_F(InitGoogleTestTest, ListTestsFlag) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(true));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(true), false);
}
// Tests having a --gtest_list_tests flag with a "true" value
@@ -5661,7 +5516,7 @@ TEST_F(InitGoogleTestTest, ListTestsTrue) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(true));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(true), false);
}
// Tests having a --gtest_list_tests flag with a "false" value
@@ -5677,7 +5532,7 @@ TEST_F(InitGoogleTestTest, ListTestsFalse) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(false));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(false), false);
}
// Tests parsing --gtest_list_tests=f.
@@ -5693,7 +5548,7 @@ TEST_F(InitGoogleTestTest, ListTestsFalse_f) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(false));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(false), false);
}
// Tests parsing --gtest_list_tests=F.
@@ -5709,7 +5564,7 @@ TEST_F(InitGoogleTestTest, ListTestsFalse_F) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(false));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(false), false);
}
// Tests parsing --gtest_output (invalid).
@@ -5726,7 +5581,7 @@ TEST_F(InitGoogleTestTest, OutputEmpty) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags());
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags(), true);
}
// Tests parsing --gtest_output=xml
@@ -5742,7 +5597,7 @@ TEST_F(InitGoogleTestTest, OutputXml) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Output("xml"));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Output("xml"), false);
}
// Tests parsing --gtest_output=xml:file
@@ -5758,7 +5613,7 @@ TEST_F(InitGoogleTestTest, OutputXmlFile) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Output("xml:file"));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Output("xml:file"), false);
}
// Tests parsing --gtest_output=xml:directory/path/
@@ -5774,7 +5629,8 @@ TEST_F(InitGoogleTestTest, OutputXmlDirectory) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Output("xml:directory/path/"));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2,
+ Flags::Output("xml:directory/path/"), false);
}
// Tests having a --gtest_print_time flag
@@ -5790,7 +5646,7 @@ TEST_F(InitGoogleTestTest, PrintTimeFlag) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(true));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(true), false);
}
// Tests having a --gtest_print_time flag with a "true" value
@@ -5806,7 +5662,7 @@ TEST_F(InitGoogleTestTest, PrintTimeTrue) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(true));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(true), false);
}
// Tests having a --gtest_print_time flag with a "false" value
@@ -5822,7 +5678,7 @@ TEST_F(InitGoogleTestTest, PrintTimeFalse) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(false));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(false), false);
}
// Tests parsing --gtest_print_time=f.
@@ -5838,7 +5694,7 @@ TEST_F(InitGoogleTestTest, PrintTimeFalse_f) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(false));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(false), false);
}
// Tests parsing --gtest_print_time=F.
@@ -5854,7 +5710,7 @@ TEST_F(InitGoogleTestTest, PrintTimeFalse_F) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(false));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(false), false);
}
// Tests parsing --gtest_random_seed=number
@@ -5870,7 +5726,7 @@ TEST_F(InitGoogleTestTest, RandomSeed) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::RandomSeed(1000));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::RandomSeed(1000), false);
}
// Tests parsing --gtest_repeat=number
@@ -5886,7 +5742,7 @@ TEST_F(InitGoogleTestTest, Repeat) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Repeat(1000));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Repeat(1000), false);
}
// Tests having a --gtest_also_run_disabled_tests flag
@@ -5902,7 +5758,8 @@ TEST_F(InitGoogleTestTest, AlsoRunDisabledTestsFlag) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::AlsoRunDisabledTests(true));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2,
+ Flags::AlsoRunDisabledTests(true), false);
}
// Tests having a --gtest_also_run_disabled_tests flag with a "true" value
@@ -5918,7 +5775,8 @@ TEST_F(InitGoogleTestTest, AlsoRunDisabledTestsTrue) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::AlsoRunDisabledTests(true));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2,
+ Flags::AlsoRunDisabledTests(true), false);
}
// Tests having a --gtest_also_run_disabled_tests flag with a "false" value
@@ -5934,7 +5792,8 @@ TEST_F(InitGoogleTestTest, AlsoRunDisabledTestsFalse) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::AlsoRunDisabledTests(false));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2,
+ Flags::AlsoRunDisabledTests(false), false);
}
// Tests parsing --gtest_shuffle.
@@ -5950,7 +5809,7 @@ TEST_F(InitGoogleTestTest, ShuffleWithoutValue) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Shuffle(true));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Shuffle(true), false);
}
// Tests parsing --gtest_shuffle=0.
@@ -5966,7 +5825,7 @@ TEST_F(InitGoogleTestTest, ShuffleFalse_0) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Shuffle(false));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Shuffle(false), false);
}
// Tests parsing a --gtest_shuffle flag that has a "true"
@@ -5983,7 +5842,23 @@ TEST_F(InitGoogleTestTest, ShuffleTrue) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Shuffle(true));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Shuffle(true), false);
+}
+
+// Tests parsing --gtest_stack_trace_depth=number.
+TEST_F(InitGoogleTestTest, StackTraceDepth) {
+ const char* argv[] = {
+ "foo.exe",
+ "--gtest_stack_trace_depth=5",
+ NULL
+ };
+
+ const char* argv2[] = {
+ "foo.exe",
+ NULL
+ };
+
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::StackTraceDepth(5), false);
}
// Tests parsing --gtest_throw_on_failure.
@@ -5999,7 +5874,7 @@ TEST_F(InitGoogleTestTest, ThrowOnFailureWithoutValue) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ThrowOnFailure(true));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ThrowOnFailure(true), false);
}
// Tests parsing --gtest_throw_on_failure=0.
@@ -6015,7 +5890,7 @@ TEST_F(InitGoogleTestTest, ThrowOnFailureFalse_0) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ThrowOnFailure(false));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ThrowOnFailure(false), false);
}
// Tests parsing a --gtest_throw_on_failure flag that has a "true"
@@ -6032,7 +5907,7 @@ TEST_F(InitGoogleTestTest, ThrowOnFailureTrue) {
NULL
};
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ThrowOnFailure(true));
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ThrowOnFailure(true), false);
}
#if GTEST_OS_WINDOWS
@@ -6058,7 +5933,7 @@ TEST_F(InitGoogleTestTest, WideStrings) {
expected_flags.filter = "Foo*";
expected_flags.list_tests = true;
- GTEST_TEST_PARSING_FLAGS_(argv, argv2, expected_flags);
+ GTEST_TEST_PARSING_FLAGS_(argv, argv2, expected_flags, false);
}
#endif // GTEST_OS_WINDOWS
@@ -6420,23 +6295,6 @@ TEST(StaticAssertTypeEqTest, CompilesForEqualTypes) {
StaticAssertTypeEq<int*, IntAlias*>();
}
-TEST(ThreadLocalTest, DefaultConstructor) {
- ThreadLocal<int> t1;
- EXPECT_EQ(0, t1.get());
-
- ThreadLocal<void*> t2;
- EXPECT_TRUE(t2.get() == NULL);
-}
-
-TEST(ThreadLocalTest, Init) {
- ThreadLocal<int> t1(123);
- EXPECT_EQ(123, t1.get());
-
- int i = 0;
- ThreadLocal<int*> t2(&i);
- EXPECT_EQ(&i, t2.get());
-}
-
TEST(GetCurrentOsStackTraceExceptTopTest, ReturnsTheStackTrace) {
testing::UnitTest* const unit_test = testing::UnitTest::GetInstance();
@@ -6608,26 +6466,26 @@ TEST(TestEventListenersTest, Append) {
// order.
class SequenceTestingListener : public EmptyTestEventListener {
public:
- SequenceTestingListener(Vector<String>* vector, const char* id)
+ SequenceTestingListener(std::vector<String>* vector, const char* id)
: vector_(vector), id_(id) {}
protected:
virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {
- vector_->PushBack(GetEventDescription("OnTestProgramStart"));
+ vector_->push_back(GetEventDescription("OnTestProgramStart"));
}
virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {
- vector_->PushBack(GetEventDescription("OnTestProgramEnd"));
+ vector_->push_back(GetEventDescription("OnTestProgramEnd"));
}
virtual void OnTestIterationStart(const UnitTest& /*unit_test*/,
int /*iteration*/) {
- vector_->PushBack(GetEventDescription("OnTestIterationStart"));
+ vector_->push_back(GetEventDescription("OnTestIterationStart"));
}
virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/,
int /*iteration*/) {
- vector_->PushBack(GetEventDescription("OnTestIterationEnd"));
+ vector_->push_back(GetEventDescription("OnTestIterationEnd"));
}
private:
@@ -6637,14 +6495,14 @@ class SequenceTestingListener : public EmptyTestEventListener {
return message.GetString();
}
- Vector<String>* vector_;
+ std::vector<String>* vector_;
const char* const id_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(SequenceTestingListener);
};
TEST(EventListenerTest, AppendKeepsOrder) {
- Vector<String> vec;
+ std::vector<String> vec;
TestEventListeners listeners;
listeners.Append(new SequenceTestingListener(&vec, "1st"));
listeners.Append(new SequenceTestingListener(&vec, "2nd"));
@@ -6652,34 +6510,34 @@ TEST(EventListenerTest, AppendKeepsOrder) {
TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart(
*UnitTest::GetInstance());
- ASSERT_EQ(3, vec.size());
- EXPECT_STREQ("1st.OnTestProgramStart", vec.GetElement(0).c_str());
- EXPECT_STREQ("2nd.OnTestProgramStart", vec.GetElement(1).c_str());
- EXPECT_STREQ("3rd.OnTestProgramStart", vec.GetElement(2).c_str());
+ ASSERT_EQ(3U, vec.size());
+ EXPECT_STREQ("1st.OnTestProgramStart", vec[0].c_str());
+ EXPECT_STREQ("2nd.OnTestProgramStart", vec[1].c_str());
+ EXPECT_STREQ("3rd.OnTestProgramStart", vec[2].c_str());
- vec.Clear();
+ vec.clear();
TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramEnd(
*UnitTest::GetInstance());
- ASSERT_EQ(3, vec.size());
- EXPECT_STREQ("3rd.OnTestProgramEnd", vec.GetElement(0).c_str());
- EXPECT_STREQ("2nd.OnTestProgramEnd", vec.GetElement(1).c_str());
- EXPECT_STREQ("1st.OnTestProgramEnd", vec.GetElement(2).c_str());
+ ASSERT_EQ(3U, vec.size());
+ EXPECT_STREQ("3rd.OnTestProgramEnd", vec[0].c_str());
+ EXPECT_STREQ("2nd.OnTestProgramEnd", vec[1].c_str());
+ EXPECT_STREQ("1st.OnTestProgramEnd", vec[2].c_str());
- vec.Clear();
+ vec.clear();
TestEventListenersAccessor::GetRepeater(&listeners)->OnTestIterationStart(
*UnitTest::GetInstance(), 0);
- ASSERT_EQ(3, vec.size());
- EXPECT_STREQ("1st.OnTestIterationStart", vec.GetElement(0).c_str());
- EXPECT_STREQ("2nd.OnTestIterationStart", vec.GetElement(1).c_str());
- EXPECT_STREQ("3rd.OnTestIterationStart", vec.GetElement(2).c_str());
+ ASSERT_EQ(3U, vec.size());
+ EXPECT_STREQ("1st.OnTestIterationStart", vec[0].c_str());
+ EXPECT_STREQ("2nd.OnTestIterationStart", vec[1].c_str());
+ EXPECT_STREQ("3rd.OnTestIterationStart", vec[2].c_str());
- vec.Clear();
+ vec.clear();
TestEventListenersAccessor::GetRepeater(&listeners)->OnTestIterationEnd(
*UnitTest::GetInstance(), 0);
- ASSERT_EQ(3, vec.size());
- EXPECT_STREQ("3rd.OnTestIterationEnd", vec.GetElement(0).c_str());
- EXPECT_STREQ("2nd.OnTestIterationEnd", vec.GetElement(1).c_str());
- EXPECT_STREQ("1st.OnTestIterationEnd", vec.GetElement(2).c_str());
+ ASSERT_EQ(3U, vec.size());
+ EXPECT_STREQ("3rd.OnTestIterationEnd", vec[0].c_str());
+ EXPECT_STREQ("2nd.OnTestIterationEnd", vec[1].c_str());
+ EXPECT_STREQ("1st.OnTestIterationEnd", vec[2].c_str());
}
// Tests that a listener removed from a TestEventListeners list stops receiving
@@ -6845,3 +6703,16 @@ TEST(EventListenerTest, RemovingDefaultXmlGeneratorWorks) {
EXPECT_FALSE(is_destroyed);
delete listener;
}
+
+// Sanity tests to ensure that the alternative, verbose spellings of
+// some of the macros work. We don't test them thoroughly as that
+// would be quite involved. Since their implementations are
+// straightforward, and they are rarely used, we'll just rely on the
+// users to tell us when they are broken.
+GTEST_TEST(AlternativeNameTest, Works) { // GTEST_TEST is the same as TEST.
+ GTEST_SUCCEED() << "OK"; // GTEST_SUCCEED is the same as SUCCEED.
+
+ // GTEST_FAIL is the same as FAIL.
+ EXPECT_FATAL_FAILURE(GTEST_FAIL() << "An expected failure",
+ "An expected failure");
+}
diff --git a/gtest/test/production.h b/gtest/test/production.h
index 59970da..8f16fff 100644
--- a/gtest/test/production.h
+++ b/gtest/test/production.h
@@ -48,7 +48,7 @@ class PrivateCode {
int x() const { return x_; }
private:
- void set_x(int x) { x_ = x; }
+ void set_x(int an_x) { x_ = an_x; }
int x_;
};
diff --git a/gtest/test/run_tests_util.py b/gtest/test/run_tests_util.py
new file mode 100755
index 0000000..9e57931
--- /dev/null
+++ b/gtest/test/run_tests_util.py
@@ -0,0 +1,466 @@
+# Copyright 2008 Google Inc. All Rights Reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Provides facilities for running SCons-built Google Test/Mock tests."""
+
+
+import optparse
+import os
+import re
+import sets
+import sys
+
+try:
+ # subrocess module is a preferable way to invoke subprocesses but it may
+ # not be available on MacOS X 10.4.
+ # Suppresses the 'Import not at the top of the file' lint complaint.
+ # pylint: disable-msg=C6204
+ import subprocess
+except ImportError:
+ subprocess = None
+
+HELP_MSG = """Runs the specified tests for %(proj)s.
+
+SYNOPSIS
+ run_tests.py [OPTION]... [BUILD_DIR]... [TEST]...
+
+DESCRIPTION
+ Runs the specified tests (either binary or Python), and prints a
+ summary of the results. BUILD_DIRS will be used to search for the
+ binaries. If no TESTs are specified, all binary tests found in
+ BUILD_DIRs and all Python tests found in the directory test/ (in the
+ %(proj)s root) are run.
+
+ TEST is a name of either a binary or a Python test. A binary test is
+ an executable file named *_test or *_unittest (with the .exe
+ extension on Windows) A Python test is a script named *_test.py or
+ *_unittest.py.
+
+OPTIONS
+ -h, --help
+ Print this help message.
+ -c CONFIGURATIONS
+ Specify build directories via build configurations.
+ CONFIGURATIONS is either a comma-separated list of build
+ configurations or 'all'. Each configuration is equivalent to
+ adding 'scons/build/<configuration>/%(proj)s/scons' to BUILD_DIRs.
+ Specifying -c=all is equivalent to providing all directories
+ listed in KNOWN BUILD DIRECTORIES section below.
+ -a
+ Equivalent to -c=all
+ -b
+ Equivalent to -c=all with the exception that the script will not
+ fail if some of the KNOWN BUILD DIRECTORIES do not exists; the
+ script will simply not run the tests there. 'b' stands for
+ 'built directories'.
+
+RETURN VALUE
+ Returns 0 if all tests are successful; otherwise returns 1.
+
+EXAMPLES
+ run_tests.py
+ Runs all tests for the default build configuration.
+ run_tests.py -a
+ Runs all tests with binaries in KNOWN BUILD DIRECTORIES.
+ run_tests.py -b
+ Runs all tests in KNOWN BUILD DIRECTORIES that have been
+ built.
+ run_tests.py foo/
+ Runs all tests in the foo/ directory and all Python tests in
+ the directory test. The Python tests are instructed to look
+ for binaries in foo/.
+ run_tests.py bar_test.exe test/baz_test.exe foo/ bar/
+ Runs foo/bar_test.exe, bar/bar_test.exe, foo/baz_test.exe, and
+ bar/baz_test.exe.
+ run_tests.py foo bar test/foo_test.py
+ Runs test/foo_test.py twice instructing it to look for its
+ test binaries in the directories foo and bar,
+ correspondingly.
+
+KNOWN BUILD DIRECTORIES
+ run_tests.py knows about directories where the SCons build script
+ deposits its products. These are the directories where run_tests.py
+ will be looking for its binaries. Currently, %(proj)s's SConstruct file
+ defines them as follows (the default build directory is the first one
+ listed in each group):
+ On Windows:
+ <%(proj)s root>/scons/build/win-dbg8/%(proj)s/scons/
+ <%(proj)s root>/scons/build/win-opt8/%(proj)s/scons/
+ On Mac:
+ <%(proj)s root>/scons/build/mac-dbg/%(proj)s/scons/
+ <%(proj)s root>/scons/build/mac-opt/%(proj)s/scons/
+ On other platforms:
+ <%(proj)s root>/scons/build/dbg/%(proj)s/scons/
+ <%(proj)s root>/scons/build/opt/%(proj)s/scons/"""
+
+IS_WINDOWS = os.name == 'nt'
+IS_MAC = os.name == 'posix' and os.uname()[0] == 'Darwin'
+IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
+
+# Definition of CONFIGS must match that of the build directory names in the
+# SConstruct script. The first list item is the default build configuration.
+if IS_WINDOWS:
+ CONFIGS = ('win-dbg8', 'win-opt8')
+elif IS_MAC:
+ CONFIGS = ('mac-dbg', 'mac-opt')
+else:
+ CONFIGS = ('dbg', 'opt')
+
+if IS_WINDOWS or IS_CYGWIN:
+ PYTHON_TEST_REGEX = re.compile(r'_(unit)?test\.py$', re.IGNORECASE)
+ BINARY_TEST_REGEX = re.compile(r'_(unit)?test(\.exe)?$', re.IGNORECASE)
+ BINARY_TEST_SEARCH_REGEX = re.compile(r'_(unit)?test\.exe$', re.IGNORECASE)
+else:
+ PYTHON_TEST_REGEX = re.compile(r'_(unit)?test\.py$')
+ BINARY_TEST_REGEX = re.compile(r'_(unit)?test$')
+ BINARY_TEST_SEARCH_REGEX = BINARY_TEST_REGEX
+
+
+def _GetGtestBuildDir(injected_os, script_dir, config):
+ """Calculates path to the Google Test SCons build directory."""
+
+ return injected_os.path.normpath(injected_os.path.join(script_dir,
+ 'scons/build',
+ config,
+ 'gtest/scons'))
+
+
+def _GetConfigFromBuildDir(build_dir):
+ """Extracts the configuration name from the build directory."""
+
+ # We don't want to depend on build_dir containing the correct path
+ # separators.
+ m = re.match(r'.*[\\/]([^\\/]+)[\\/][^\\/]+[\\/]scons[\\/]?$', build_dir)
+ if m:
+ return m.group(1)
+ else:
+ print >>sys.stderr, ('%s is an invalid build directory that does not '
+ 'correspond to any configuration.' % (build_dir,))
+ return ''
+
+
+# All paths in this script are either absolute or relative to the current
+# working directory, unless otherwise specified.
+class TestRunner(object):
+ """Provides facilities for running Python and binary tests for Google Test."""
+
+ def __init__(self,
+ script_dir,
+ build_dir_var_name='GTEST_BUILD_DIR',
+ injected_os=os,
+ injected_subprocess=subprocess,
+ injected_build_dir_finder=_GetGtestBuildDir):
+ """Initializes a TestRunner instance.
+
+ Args:
+ script_dir: File path to the calling script.
+ build_dir_var_name: Name of the env variable used to pass the
+ the build directory path to the invoked
+ tests.
+ injected_os: standard os module or a mock/stub for
+ testing.
+ injected_subprocess: standard subprocess module or a mock/stub
+ for testing
+ injected_build_dir_finder: function that determines the path to
+ the build directory.
+ """
+
+ self.os = injected_os
+ self.subprocess = injected_subprocess
+ self.build_dir_finder = injected_build_dir_finder
+ self.build_dir_var_name = build_dir_var_name
+ self.script_dir = script_dir
+
+ def _GetBuildDirForConfig(self, config):
+ """Returns the build directory for a given configuration."""
+
+ return self.build_dir_finder(self.os, self.script_dir, config)
+
+ def _Run(self, args):
+ """Runs the executable with given args (args[0] is the executable name).
+
+ Args:
+ args: Command line arguments for the process.
+
+ Returns:
+ Process's exit code if it exits normally, or -signal if the process is
+ killed by a signal.
+ """
+
+ if self.subprocess:
+ return self.subprocess.Popen(args).wait()
+ else:
+ return self.os.spawnv(self.os.P_WAIT, args[0], args)
+
+ def _RunBinaryTest(self, test):
+ """Runs the binary test given its path.
+
+ Args:
+ test: Path to the test binary.
+
+ Returns:
+ Process's exit code if it exits normally, or -signal if the process is
+ killed by a signal.
+ """
+
+ return self._Run([test])
+
+ def _RunPythonTest(self, test, build_dir):
+ """Runs the Python test script with the specified build directory.
+
+ Args:
+ test: Path to the test's Python script.
+ build_dir: Path to the directory where the test binary is to be found.
+
+ Returns:
+ Process's exit code if it exits normally, or -signal if the process is
+ killed by a signal.
+ """
+
+ old_build_dir = self.os.environ.get(self.build_dir_var_name)
+
+ try:
+ self.os.environ[self.build_dir_var_name] = build_dir
+
+ # If this script is run on a Windows machine that has no association
+ # between the .py extension and a python interpreter, simply passing
+ # the script name into subprocess.Popen/os.spawn will not work.
+ print 'Running %s . . .' % (test,)
+ return self._Run([sys.executable, test])
+
+ finally:
+ if old_build_dir is None:
+ del self.os.environ[self.build_dir_var_name]
+ else:
+ self.os.environ[self.build_dir_var_name] = old_build_dir
+
+ def _FindFilesByRegex(self, directory, regex):
+ """Returns files in a directory whose names match a regular expression.
+
+ Args:
+ directory: Path to the directory to search for files.
+ regex: Regular expression to filter file names.
+
+ Returns:
+ The list of the paths to the files in the directory.
+ """
+
+ return [self.os.path.join(directory, file_name)
+ for file_name in self.os.listdir(directory)
+ if re.search(regex, file_name)]
+
+ # TODO(vladl@google.com): Implement parsing of scons/SConscript to run all
+ # tests defined there when no tests are specified.
+ # TODO(vladl@google.com): Update the docstring after the code is changed to
+ # try to test all builds defined in scons/SConscript.
+ def GetTestsToRun(self,
+ args,
+ named_configurations,
+ built_configurations,
+ available_configurations=CONFIGS,
+ python_tests_to_skip=None):
+ """Determines what tests should be run.
+
+ Args:
+ args: The list of non-option arguments from the command line.
+ named_configurations: The list of configurations specified via -c or -a.
+ built_configurations: True if -b has been specified.
+ available_configurations: a list of configurations available on the
+ current platform, injectable for testing.
+ python_tests_to_skip: a collection of (configuration, python test name)s
+ that need to be skipped.
+
+ Returns:
+ A tuple with 2 elements: the list of Python tests to run and the list of
+ binary tests to run.
+ """
+
+ if named_configurations == 'all':
+ named_configurations = ','.join(available_configurations)
+
+ normalized_args = [self.os.path.normpath(arg) for arg in args]
+
+ # A final list of build directories which will be searched for the test
+ # binaries. First, add directories specified directly on the command
+ # line.
+ build_dirs = filter(self.os.path.isdir, normalized_args)
+
+ # Adds build directories specified via their build configurations using
+ # the -c or -a options.
+ if named_configurations:
+ build_dirs += [self._GetBuildDirForConfig(config)
+ for config in named_configurations.split(',')]
+
+ # Adds KNOWN BUILD DIRECTORIES if -b is specified.
+ if built_configurations:
+ build_dirs += [self._GetBuildDirForConfig(config)
+ for config in available_configurations
+ if self.os.path.isdir(self._GetBuildDirForConfig(config))]
+
+ # If no directories were specified either via -a, -b, -c, or directly, use
+ # the default configuration.
+ elif not build_dirs:
+ build_dirs = [self._GetBuildDirForConfig(available_configurations[0])]
+
+ # Makes sure there are no duplications.
+ build_dirs = sets.Set(build_dirs)
+
+ errors_found = False
+ listed_python_tests = [] # All Python tests listed on the command line.
+ listed_binary_tests = [] # All binary tests listed on the command line.
+
+ test_dir = self.os.path.normpath(self.os.path.join(self.script_dir, 'test'))
+
+ # Sifts through non-directory arguments fishing for any Python or binary
+ # tests and detecting errors.
+ for argument in sets.Set(normalized_args) - build_dirs:
+ if re.search(PYTHON_TEST_REGEX, argument):
+ python_path = self.os.path.join(test_dir,
+ self.os.path.basename(argument))
+ if self.os.path.isfile(python_path):
+ listed_python_tests.append(python_path)
+ else:
+ sys.stderr.write('Unable to find Python test %s' % argument)
+ errors_found = True
+ elif re.search(BINARY_TEST_REGEX, argument):
+ # This script also accepts binary test names prefixed with test/ for
+ # the convenience of typing them (can use path completions in the
+ # shell). Strips test/ prefix from the binary test names.
+ listed_binary_tests.append(self.os.path.basename(argument))
+ else:
+ sys.stderr.write('%s is neither test nor build directory' % argument)
+ errors_found = True
+
+ if errors_found:
+ return None
+
+ user_has_listed_tests = listed_python_tests or listed_binary_tests
+
+ if user_has_listed_tests:
+ selected_python_tests = listed_python_tests
+ else:
+ selected_python_tests = self._FindFilesByRegex(test_dir,
+ PYTHON_TEST_REGEX)
+
+ # TODO(vladl@google.com): skip unbuilt Python tests when -b is specified.
+ python_test_pairs = []
+ for directory in build_dirs:
+ for test in selected_python_tests:
+ config = _GetConfigFromBuildDir(directory)
+ file_name = os.path.basename(test)
+ if python_tests_to_skip and (config, file_name) in python_tests_to_skip:
+ print ('NOTE: %s is skipped for configuration %s, as it does not '
+ 'work there.' % (file_name, config))
+ else:
+ python_test_pairs.append((directory, test))
+
+ binary_test_pairs = []
+ for directory in build_dirs:
+ if user_has_listed_tests:
+ binary_test_pairs.extend(
+ [(directory, self.os.path.join(directory, test))
+ for test in listed_binary_tests])
+ else:
+ tests = self._FindFilesByRegex(directory, BINARY_TEST_SEARCH_REGEX)
+ binary_test_pairs.extend([(directory, test) for test in tests])
+
+ return (python_test_pairs, binary_test_pairs)
+
+ def RunTests(self, python_tests, binary_tests):
+ """Runs Python and binary tests and reports results to the standard output.
+
+ Args:
+ python_tests: List of Python tests to run in the form of tuples
+ (build directory, Python test script).
+ binary_tests: List of binary tests to run in the form of tuples
+ (build directory, binary file).
+
+ Returns:
+ The exit code the program should pass into sys.exit().
+ """
+
+ if python_tests or binary_tests:
+ results = []
+ for directory, test in python_tests:
+ results.append((directory,
+ test,
+ self._RunPythonTest(test, directory) == 0))
+ for directory, test in binary_tests:
+ results.append((directory,
+ self.os.path.basename(test),
+ self._RunBinaryTest(test) == 0))
+
+ failed = [(directory, test)
+ for (directory, test, success) in results
+ if not success]
+ print
+ print '%d tests run.' % len(results)
+ if failed:
+ print 'The following %d tests failed:' % len(failed)
+ for (directory, test) in failed:
+ print '%s in %s' % (test, directory)
+ return 1
+ else:
+ print 'All tests passed!'
+ else: # No tests defined
+ print 'Nothing to test - no tests specified!'
+
+ return 0
+
+
+def ParseArgs(project_name, argv=None, help_callback=None):
+ """Parses the options run_tests.py uses."""
+
+ # Suppresses lint warning on unused arguments. These arguments are
+ # required by optparse, even though they are unused.
+ # pylint: disable-msg=W0613
+ def PrintHelp(option, opt, value, parser):
+ print HELP_MSG % {'proj': project_name}
+ sys.exit(1)
+
+ parser = optparse.OptionParser()
+ parser.add_option('-c',
+ action='store',
+ dest='configurations',
+ default=None)
+ parser.add_option('-a',
+ action='store_const',
+ dest='configurations',
+ default=None,
+ const='all')
+ parser.add_option('-b',
+ action='store_const',
+ dest='built_configurations',
+ default=False,
+ const=True)
+ # Replaces the built-in help with ours.
+ parser.remove_option('-h')
+ parser.add_option('-h', '--help',
+ action='callback',
+ callback=help_callback or PrintHelp)
+ return parser.parse_args(argv)
diff --git a/gtest/test/run_tests_util_test.py b/gtest/test/run_tests_util_test.py
new file mode 100755
index 0000000..9c55726
--- /dev/null
+++ b/gtest/test/run_tests_util_test.py
@@ -0,0 +1,676 @@
+#!/usr/bin/env python
+#
+# Copyright 2009 Google Inc. All Rights Reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Tests for run_tests_util.py test runner script."""
+
+__author__ = 'vladl@google.com (Vlad Losev)'
+
+import os
+import re
+import sets
+import unittest
+
+import run_tests_util
+
+
+GTEST_DBG_DIR = 'scons/build/dbg/gtest/scons'
+GTEST_OPT_DIR = 'scons/build/opt/gtest/scons'
+GTEST_OTHER_DIR = 'scons/build/other/gtest/scons'
+
+
+def AddExeExtension(path):
+ """Appends .exe to the path on Windows or Cygwin."""
+
+ if run_tests_util.IS_WINDOWS or run_tests_util.IS_CYGWIN:
+ return path + '.exe'
+ else:
+ return path
+
+
+class FakePath(object):
+ """A fake os.path module for testing."""
+
+ def __init__(self, current_dir=os.getcwd(), known_paths=None):
+ self.current_dir = current_dir
+ self.tree = {}
+ self.path_separator = os.sep
+
+ # known_paths contains either absolute or relative paths. Relative paths
+ # are absolutized with self.current_dir.
+ if known_paths:
+ self._AddPaths(known_paths)
+
+ def _AddPath(self, path):
+ ends_with_slash = path.endswith('/')
+ path = self.abspath(path)
+ if ends_with_slash:
+ path += self.path_separator
+ name_list = path.split(self.path_separator)
+ tree = self.tree
+ for name in name_list[:-1]:
+ if not name:
+ continue
+ if name in tree:
+ tree = tree[name]
+ else:
+ tree[name] = {}
+ tree = tree[name]
+
+ name = name_list[-1]
+ if name:
+ if name in tree:
+ assert tree[name] == 1
+ else:
+ tree[name] = 1
+
+ def _AddPaths(self, paths):
+ for path in paths:
+ self._AddPath(path)
+
+ def PathElement(self, path):
+ """Returns an internal representation of directory tree entry for path."""
+ tree = self.tree
+ name_list = self.abspath(path).split(self.path_separator)
+ for name in name_list:
+ if not name:
+ continue
+ tree = tree.get(name, None)
+ if tree is None:
+ break
+
+ return tree
+
+ # Silences pylint warning about using standard names.
+ # pylint: disable-msg=C6409
+ def normpath(self, path):
+ return os.path.normpath(path)
+
+ def abspath(self, path):
+ return self.normpath(os.path.join(self.current_dir, path))
+
+ def isfile(self, path):
+ return self.PathElement(self.abspath(path)) == 1
+
+ def isdir(self, path):
+ return type(self.PathElement(self.abspath(path))) == type(dict())
+
+ def basename(self, path):
+ return os.path.basename(path)
+
+ def dirname(self, path):
+ return os.path.dirname(path)
+
+ def join(self, *kargs):
+ return os.path.join(*kargs)
+
+
+class FakeOs(object):
+ """A fake os module for testing."""
+ P_WAIT = os.P_WAIT
+
+ def __init__(self, fake_path_module):
+ self.path = fake_path_module
+
+ # Some methods/attributes are delegated to the real os module.
+ self.environ = os.environ
+
+ # pylint: disable-msg=C6409
+ def listdir(self, path):
+ assert self.path.isdir(path)
+ return self.path.PathElement(path).iterkeys()
+
+ def spawnv(self, wait, executable, *kargs):
+ assert wait == FakeOs.P_WAIT
+ return self.spawn_impl(executable, kargs)
+
+
+class GetTestsToRunTest(unittest.TestCase):
+ """Exercises TestRunner.GetTestsToRun."""
+
+ def NormalizeGetTestsToRunResults(self, results):
+ """Normalizes path data returned from GetTestsToRun for comparison."""
+
+ def NormalizePythonTestPair(pair):
+ """Normalizes path data in the (directory, python_script) pair."""
+
+ return (os.path.normpath(pair[0]), os.path.normpath(pair[1]))
+
+ def NormalizeBinaryTestPair(pair):
+ """Normalizes path data in the (directory, binary_executable) pair."""
+
+ directory, executable = map(os.path.normpath, pair)
+
+ # On Windows and Cygwin, the test file names have the .exe extension, but
+ # they can be invoked either by name or by name+extension. Our test must
+ # accommodate both situations.
+ if run_tests_util.IS_WINDOWS or run_tests_util.IS_CYGWIN:
+ executable = re.sub(r'\.exe$', '', executable)
+ return (directory, executable)
+
+ python_tests = sets.Set(map(NormalizePythonTestPair, results[0]))
+ binary_tests = sets.Set(map(NormalizeBinaryTestPair, results[1]))
+ return (python_tests, binary_tests)
+
+ def AssertResultsEqual(self, results, expected):
+ """Asserts results returned by GetTestsToRun equal to expected results."""
+
+ self.assertEqual(self.NormalizeGetTestsToRunResults(results),
+ self.NormalizeGetTestsToRunResults(expected),
+ 'Incorrect set of tests returned:\n%s\nexpected:\n%s' %
+ (results, expected))
+
+ def setUp(self):
+ self.fake_os = FakeOs(FakePath(
+ current_dir=os.path.abspath(os.path.dirname(run_tests_util.__file__)),
+ known_paths=[AddExeExtension(GTEST_DBG_DIR + '/gtest_unittest'),
+ AddExeExtension(GTEST_OPT_DIR + '/gtest_unittest'),
+ 'test/gtest_color_test.py']))
+ self.fake_configurations = ['dbg', 'opt']
+ self.test_runner = run_tests_util.TestRunner(script_dir='.',
+ injected_os=self.fake_os,
+ injected_subprocess=None)
+
+ def testBinaryTestsOnly(self):
+ """Exercises GetTestsToRun with parameters designating binary tests only."""
+
+ # A default build.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ ['gtest_unittest'],
+ '',
+ False,
+ available_configurations=self.fake_configurations),
+ ([],
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
+
+ # An explicitly specified directory.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ [GTEST_DBG_DIR, 'gtest_unittest'],
+ '',
+ False,
+ available_configurations=self.fake_configurations),
+ ([],
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
+
+ # A particular configuration.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ ['gtest_unittest'],
+ 'other',
+ False,
+ available_configurations=self.fake_configurations),
+ ([],
+ [(GTEST_OTHER_DIR, GTEST_OTHER_DIR + '/gtest_unittest')]))
+
+ # All available configurations
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ ['gtest_unittest'],
+ 'all',
+ False,
+ available_configurations=self.fake_configurations),
+ ([],
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
+ (GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
+
+ # All built configurations (unbuilt don't cause failure).
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ ['gtest_unittest'],
+ '',
+ True,
+ available_configurations=self.fake_configurations + ['unbuilt']),
+ ([],
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
+ (GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
+
+ # A combination of an explicit directory and a configuration.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ [GTEST_DBG_DIR, 'gtest_unittest'],
+ 'opt',
+ False,
+ available_configurations=self.fake_configurations),
+ ([],
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
+ (GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
+
+ # Same test specified in an explicit directory and via a configuration.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ [GTEST_DBG_DIR, 'gtest_unittest'],
+ 'dbg',
+ False,
+ available_configurations=self.fake_configurations),
+ ([],
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
+
+ # All built configurations + explicit directory + explicit configuration.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ [GTEST_DBG_DIR, 'gtest_unittest'],
+ 'opt',
+ True,
+ available_configurations=self.fake_configurations),
+ ([],
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
+ (GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
+
+ def testPythonTestsOnly(self):
+ """Exercises GetTestsToRun with parameters designating Python tests only."""
+
+ # A default build.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ ['gtest_color_test.py'],
+ '',
+ False,
+ available_configurations=self.fake_configurations),
+ ([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
+ []))
+
+ # An explicitly specified directory.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ [GTEST_DBG_DIR, 'test/gtest_color_test.py'],
+ '',
+ False,
+ available_configurations=self.fake_configurations),
+ ([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
+ []))
+
+ # A particular configuration.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ ['gtest_color_test.py'],
+ 'other',
+ False,
+ available_configurations=self.fake_configurations),
+ ([(GTEST_OTHER_DIR, 'test/gtest_color_test.py')],
+ []))
+
+ # All available configurations
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ ['test/gtest_color_test.py'],
+ 'all',
+ False,
+ available_configurations=self.fake_configurations),
+ ([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
+ (GTEST_OPT_DIR, 'test/gtest_color_test.py')],
+ []))
+
+ # All built configurations (unbuilt don't cause failure).
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ ['gtest_color_test.py'],
+ '',
+ True,
+ available_configurations=self.fake_configurations + ['unbuilt']),
+ ([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
+ (GTEST_OPT_DIR, 'test/gtest_color_test.py')],
+ []))
+
+ # A combination of an explicit directory and a configuration.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ [GTEST_DBG_DIR, 'gtest_color_test.py'],
+ 'opt',
+ False,
+ available_configurations=self.fake_configurations),
+ ([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
+ (GTEST_OPT_DIR, 'test/gtest_color_test.py')],
+ []))
+
+ # Same test specified in an explicit directory and via a configuration.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ [GTEST_DBG_DIR, 'gtest_color_test.py'],
+ 'dbg',
+ False,
+ available_configurations=self.fake_configurations),
+ ([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
+ []))
+
+ # All built configurations + explicit directory + explicit configuration.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ [GTEST_DBG_DIR, 'gtest_color_test.py'],
+ 'opt',
+ True,
+ available_configurations=self.fake_configurations),
+ ([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
+ (GTEST_OPT_DIR, 'test/gtest_color_test.py')],
+ []))
+
+ def testCombinationOfBinaryAndPythonTests(self):
+ """Exercises GetTestsToRun with mixed binary/Python tests."""
+
+ # Use only default configuration for this test.
+
+ # Neither binary nor Python tests are specified so find all.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ [],
+ '',
+ False,
+ available_configurations=self.fake_configurations),
+ ([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
+
+ # Specifying both binary and Python tests.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ ['gtest_unittest', 'gtest_color_test.py'],
+ '',
+ False,
+ available_configurations=self.fake_configurations),
+ ([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
+
+ # Specifying binary tests suppresses Python tests.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ ['gtest_unittest'],
+ '',
+ False,
+ available_configurations=self.fake_configurations),
+ ([],
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
+
+ # Specifying Python tests suppresses binary tests.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ ['gtest_color_test.py'],
+ '',
+ False,
+ available_configurations=self.fake_configurations),
+ ([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
+ []))
+
+ def testIgnoresNonTestFiles(self):
+ """Verifies that GetTestsToRun ignores non-test files in the filesystem."""
+
+ self.fake_os = FakeOs(FakePath(
+ current_dir=os.path.abspath(os.path.dirname(run_tests_util.__file__)),
+ known_paths=[AddExeExtension(GTEST_DBG_DIR + '/gtest_nontest'),
+ 'test/']))
+ self.test_runner = run_tests_util.TestRunner(script_dir='.',
+ injected_os=self.fake_os,
+ injected_subprocess=None)
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ [],
+ '',
+ True,
+ available_configurations=self.fake_configurations),
+ ([], []))
+
+ def testWorksFromDifferentDir(self):
+ """Exercises GetTestsToRun from a directory different from run_test.py's."""
+
+ # Here we simulate an test script in directory /d/ called from the
+ # directory /a/b/c/.
+ self.fake_os = FakeOs(FakePath(
+ current_dir=os.path.abspath('/a/b/c'),
+ known_paths=[
+ '/a/b/c/',
+ AddExeExtension('/d/' + GTEST_DBG_DIR + '/gtest_unittest'),
+ AddExeExtension('/d/' + GTEST_OPT_DIR + '/gtest_unittest'),
+ '/d/test/gtest_color_test.py']))
+ self.fake_configurations = ['dbg', 'opt']
+ self.test_runner = run_tests_util.TestRunner(script_dir='/d/',
+ injected_os=self.fake_os,
+ injected_subprocess=None)
+ # A binary test.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ ['gtest_unittest'],
+ '',
+ False,
+ available_configurations=self.fake_configurations),
+ ([],
+ [('/d/' + GTEST_DBG_DIR, '/d/' + GTEST_DBG_DIR + '/gtest_unittest')]))
+
+ # A Python test.
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ ['gtest_color_test.py'],
+ '',
+ False,
+ available_configurations=self.fake_configurations),
+ ([('/d/' + GTEST_DBG_DIR, '/d/test/gtest_color_test.py')], []))
+
+ def testNonTestBinary(self):
+ """Exercises GetTestsToRun with a non-test parameter."""
+
+ self.assert_(
+ not self.test_runner.GetTestsToRun(
+ ['gtest_unittest_not_really'],
+ '',
+ False,
+ available_configurations=self.fake_configurations))
+
+ def testNonExistingPythonTest(self):
+ """Exercises GetTestsToRun with a non-existent Python test parameter."""
+
+ self.assert_(
+ not self.test_runner.GetTestsToRun(
+ ['nonexistent_test.py'],
+ '',
+ False,
+ available_configurations=self.fake_configurations))
+
+ if run_tests_util.IS_WINDOWS or run_tests_util.IS_CYGWIN:
+
+ def testDoesNotPickNonExeFilesOnWindows(self):
+ """Verifies that GetTestsToRun does not find _test files on Windows."""
+
+ self.fake_os = FakeOs(FakePath(
+ current_dir=os.path.abspath(os.path.dirname(run_tests_util.__file__)),
+ known_paths=['/d/' + GTEST_DBG_DIR + '/gtest_test', 'test/']))
+ self.test_runner = run_tests_util.TestRunner(script_dir='.',
+ injected_os=self.fake_os,
+ injected_subprocess=None)
+ self.AssertResultsEqual(
+ self.test_runner.GetTestsToRun(
+ [],
+ '',
+ True,
+ available_configurations=self.fake_configurations),
+ ([], []))
+
+
+class RunTestsTest(unittest.TestCase):
+ """Exercises TestRunner.RunTests."""
+
+ def SpawnSuccess(self, unused_executable, unused_argv):
+ """Fakes test success by returning 0 as an exit code."""
+
+ self.num_spawn_calls += 1
+ return 0
+
+ def SpawnFailure(self, unused_executable, unused_argv):
+ """Fakes test success by returning 1 as an exit code."""
+
+ self.num_spawn_calls += 1
+ return 1
+
+ def setUp(self):
+ self.fake_os = FakeOs(FakePath(
+ current_dir=os.path.abspath(os.path.dirname(run_tests_util.__file__)),
+ known_paths=[
+ AddExeExtension(GTEST_DBG_DIR + '/gtest_unittest'),
+ AddExeExtension(GTEST_OPT_DIR + '/gtest_unittest'),
+ 'test/gtest_color_test.py']))
+ self.fake_configurations = ['dbg', 'opt']
+ self.test_runner = run_tests_util.TestRunner(
+ script_dir=os.path.dirname(__file__) or '.',
+ injected_os=self.fake_os,
+ injected_subprocess=None)
+ self.num_spawn_calls = 0 # A number of calls to spawn.
+
+ def testRunPythonTestSuccess(self):
+ """Exercises RunTests to handle a Python test success."""
+
+ self.fake_os.spawn_impl = self.SpawnSuccess
+ self.assertEqual(
+ self.test_runner.RunTests(
+ [(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
+ []),
+ 0)
+ self.assertEqual(self.num_spawn_calls, 1)
+
+ def testRunBinaryTestSuccess(self):
+ """Exercises RunTests to handle a binary test success."""
+
+ self.fake_os.spawn_impl = self.SpawnSuccess
+ self.assertEqual(
+ self.test_runner.RunTests(
+ [],
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
+ 0)
+ self.assertEqual(self.num_spawn_calls, 1)
+
+ def testRunPythonTestFauilure(self):
+ """Exercises RunTests to handle a Python test failure."""
+
+ self.fake_os.spawn_impl = self.SpawnFailure
+ self.assertEqual(
+ self.test_runner.RunTests(
+ [(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
+ []),
+ 1)
+ self.assertEqual(self.num_spawn_calls, 1)
+
+ def testRunBinaryTestFailure(self):
+ """Exercises RunTests to handle a binary test failure."""
+
+ self.fake_os.spawn_impl = self.SpawnFailure
+ self.assertEqual(
+ self.test_runner.RunTests(
+ [],
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
+ 1)
+ self.assertEqual(self.num_spawn_calls, 1)
+
+ def testCombinedTestSuccess(self):
+ """Exercises RunTests to handle a success of both Python and binary test."""
+
+ self.fake_os.spawn_impl = self.SpawnSuccess
+ self.assertEqual(
+ self.test_runner.RunTests(
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')],
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
+ 0)
+ self.assertEqual(self.num_spawn_calls, 2)
+
+ def testCombinedTestSuccessAndFailure(self):
+ """Exercises RunTests to handle a success of both Python and binary test."""
+
+ def SpawnImpl(executable, argv):
+ self.num_spawn_calls += 1
+ # Simulates failure of a Python test and success of a binary test.
+ if '.py' in executable or '.py' in argv[0]:
+ return 1
+ else:
+ return 0
+
+ self.fake_os.spawn_impl = SpawnImpl
+ self.assertEqual(
+ self.test_runner.RunTests(
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')],
+ [(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
+ 0)
+ self.assertEqual(self.num_spawn_calls, 2)
+
+
+class ParseArgsTest(unittest.TestCase):
+ """Exercises ParseArgs."""
+
+ def testNoOptions(self):
+ options, args = run_tests_util.ParseArgs('gtest', argv=['script.py'])
+ self.assertEqual(args, ['script.py'])
+ self.assert_(options.configurations is None)
+ self.assertFalse(options.built_configurations)
+
+ def testOptionC(self):
+ options, args = run_tests_util.ParseArgs(
+ 'gtest', argv=['script.py', '-c', 'dbg'])
+ self.assertEqual(args, ['script.py'])
+ self.assertEqual(options.configurations, 'dbg')
+ self.assertFalse(options.built_configurations)
+
+ def testOptionA(self):
+ options, args = run_tests_util.ParseArgs('gtest', argv=['script.py', '-a'])
+ self.assertEqual(args, ['script.py'])
+ self.assertEqual(options.configurations, 'all')
+ self.assertFalse(options.built_configurations)
+
+ def testOptionB(self):
+ options, args = run_tests_util.ParseArgs('gtest', argv=['script.py', '-b'])
+ self.assertEqual(args, ['script.py'])
+ self.assert_(options.configurations is None)
+ self.assertTrue(options.built_configurations)
+
+ def testOptionCAndOptionB(self):
+ options, args = run_tests_util.ParseArgs(
+ 'gtest', argv=['script.py', '-c', 'dbg', '-b'])
+ self.assertEqual(args, ['script.py'])
+ self.assertEqual(options.configurations, 'dbg')
+ self.assertTrue(options.built_configurations)
+
+ def testOptionH(self):
+ help_called = [False]
+
+ # Suppresses lint warning on unused arguments. These arguments are
+ # required by optparse, even though they are unused.
+ # pylint: disable-msg=W0613
+ def VerifyHelp(option, opt, value, parser):
+ help_called[0] = True
+
+ # Verifies that -h causes the help callback to be called.
+ help_called[0] = False
+ _, args = run_tests_util.ParseArgs(
+ 'gtest', argv=['script.py', '-h'], help_callback=VerifyHelp)
+ self.assertEqual(args, ['script.py'])
+ self.assertTrue(help_called[0])
+
+ # Verifies that --help causes the help callback to be called.
+ help_called[0] = False
+ _, args = run_tests_util.ParseArgs(
+ 'gtest', argv=['script.py', '--help'], help_callback=VerifyHelp)
+ self.assertEqual(args, ['script.py'])
+ self.assertTrue(help_called[0])
+
+
+if __name__ == '__main__':
+ unittest.main()