forked from learning-process/parallel_programming_course
-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathrunners.cpp
More file actions
142 lines (116 loc) · 4.12 KB
/
runners.cpp
File metadata and controls
142 lines (116 loc) · 4.12 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
#include "runners/include/runners.hpp"
#include <gtest/gtest.h>
#include <mpi.h>
#include <chrono>
#include <cstdint>
#include <cstdlib>
#include <format>
#include <iostream>
#include <memory>
#include <random>
#include <stdexcept>
#include <string>
#include "oneapi/tbb/global_control.h"
#include "util/include/util.hpp"
namespace ppc::runners {
void UnreadMessagesDetector::OnTestEnd(const ::testing::TestInfo & /*test_info*/) {
int rank = -1;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Barrier(MPI_COMM_WORLD);
int flag = -1;
MPI_Status status;
const int iprobe_res = MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, &status);
if (iprobe_res != MPI_SUCCESS) {
std::cerr << std::format("[ PROCESS {} ] [ ERROR ] MPI_Iprobe failed with code {}", rank, iprobe_res) << '\n';
MPI_Abort(MPI_COMM_WORLD, iprobe_res);
}
if (flag != 0) {
std::cerr
<< std::format(
"[ PROCESS {} ] [ FAILED ] MPI message queue has an unread message from process {} with tag {}",
rank, status.MPI_SOURCE, status.MPI_TAG)
<< '\n';
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
MPI_Barrier(MPI_COMM_WORLD);
}
void WorkerTestFailurePrinter::OnTestEnd(const ::testing::TestInfo &test_info) {
if (test_info.result()->Passed()) {
return;
}
PrintProcessRank();
base_->OnTestEnd(test_info);
}
void WorkerTestFailurePrinter::OnTestPartResult(const ::testing::TestPartResult &test_part_result) {
if (test_part_result.passed() || test_part_result.skipped()) {
return;
}
PrintProcessRank();
base_->OnTestPartResult(test_part_result);
}
void WorkerTestFailurePrinter::PrintProcessRank() {
int rank = -1;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
std::cerr << std::format(" [ PROCESS {} ] ", rank);
}
namespace {
int RunAllTests() {
auto status = RUN_ALL_TESTS();
if (ppc::util::DestructorFailureFlag::Get()) {
throw std::runtime_error(
std::format("[ ERROR ] Destructor failed with code {}", ppc::util::DestructorFailureFlag::Get()));
}
return status;
}
} // namespace
int Init(int argc, char **argv) {
const int init_res = MPI_Init(&argc, &argv);
if (init_res != MPI_SUCCESS) {
std::cerr << std::format("[ ERROR ] MPI_Init failed with code {}", init_res) << '\n';
MPI_Abort(MPI_COMM_WORLD, init_res);
return init_res;
}
// Limit the number of threads in TBB
tbb::global_control control(tbb::global_control::max_allowed_parallelism, ppc::util::GetNumThreads());
::testing::InitGoogleTest(&argc, argv);
// Ensure consistent GoogleTest shuffle order across all MPI ranks.
unsigned int seed = 0;
int rank_for_seed = -1;
MPI_Comm_rank(MPI_COMM_WORLD, &rank_for_seed);
if (rank_for_seed == 0) {
try {
seed = std::random_device{}();
} catch (...) {
seed = 0;
}
if (seed == 0) {
const auto now = static_cast<std::uint64_t>(std::chrono::steady_clock::now().time_since_epoch().count());
seed = static_cast<unsigned int>(((now & 0x7fffffffULL) | 1ULL));
}
}
MPI_Bcast(&seed, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
::testing::GTEST_FLAG(random_seed) = static_cast<int>(seed);
auto &listeners = ::testing::UnitTest::GetInstance()->listeners();
int rank = -1;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank != 0 && (argc < 2 || argv[1] != std::string("--print-workers"))) {
auto *listener = listeners.Release(listeners.default_result_printer());
listeners.Append(new WorkerTestFailurePrinter(std::shared_ptr<::testing::TestEventListener>(listener)));
}
listeners.Append(new UnreadMessagesDetector());
auto status = RunAllTests();
const int finalize_res = MPI_Finalize();
if (finalize_res != MPI_SUCCESS) {
std::cerr << std::format("[ ERROR ] MPI_Finalize failed with code {}", finalize_res) << '\n';
MPI_Abort(MPI_COMM_WORLD, finalize_res);
return finalize_res;
}
return status;
}
int SimpleInit(int argc, char **argv) {
// Limit the number of threads in TBB
tbb::global_control control(tbb::global_control::max_allowed_parallelism, ppc::util::GetNumThreads());
testing::InitGoogleTest(&argc, argv);
return RunAllTests();
}
} // namespace ppc::runners