forked from learning-process/parallel_programming_course
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathops_all.cpp
More file actions
85 lines (70 loc) · 2.04 KB
/
ops_all.cpp
File metadata and controls
85 lines (70 loc) · 2.04 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
#include "example_threads/all/include/ops_all.hpp"
#include <mpi.h>
#include <atomic>
#include <numeric>
#include <thread>
#include <vector>
#include "example_threads/common/include/common.hpp"
#include "oneapi/tbb/parallel_for.h"
#include "util/include/util.hpp"
namespace nesterov_a_test_task_threads {
NesterovATestTaskALL::NesterovATestTaskALL(const InType &in) {
SetTypeOfTask(GetStaticTypeOfTask());
GetInput() = in;
GetOutput() = 0;
}
bool NesterovATestTaskALL::ValidationImpl() {
return (GetInput() > 0) && (GetOutput() == 0);
}
bool NesterovATestTaskALL::PreProcessingImpl() {
GetOutput() = 2 * GetInput();
return GetOutput() > 0;
}
bool NesterovATestTaskALL::RunImpl() {
for (InType i = 0; i < GetInput(); i++) {
for (InType j = 0; j < GetInput(); j++) {
for (InType k = 0; k < GetInput(); k++) {
std::vector<InType> tmp(i + j + k, 1);
GetOutput() += std::accumulate(tmp.begin(), tmp.end(), 0);
GetOutput() -= i + j + k;
}
}
}
const int num_threads = ppc::util::GetNumThreads();
{
GetOutput() *= num_threads;
int rank = -1;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0) {
std::atomic<int> counter(0);
#pragma omp parallel default(none) shared(counter) num_threads(ppc::util::GetNumThreads())
counter++;
GetOutput() /= counter;
} else {
GetOutput() /= num_threads;
}
}
{
GetOutput() *= num_threads;
std::vector<std::thread> threads(num_threads);
std::atomic<int> counter(0);
for (std::thread &thread : threads) {
thread = std::thread([&counter]() { counter++; });
thread.join();
}
GetOutput() /= counter;
}
{
GetOutput() *= num_threads;
std::atomic<int> counter(0);
tbb::parallel_for(0, ppc::util::GetNumThreads(), [&](int /*i*/) { counter++; });
GetOutput() /= counter;
}
MPI_Barrier(MPI_COMM_WORLD);
return GetOutput() > 0;
}
bool NesterovATestTaskALL::PostProcessingImpl() {
GetOutput() -= GetInput();
return GetOutput() > 0;
}
} // namespace nesterov_a_test_task_threads