aca-tasks/task1/task1-auto.cpp

115 lines
4.2 KiB
C++
Raw Normal View History

2023-10-31 13:48:54 +00:00
#include <fmt/format.h>
#include <vector>
2023-11-01 09:00:36 +00:00
#include <fstream>
2023-11-01 09:16:53 +00:00
#include <string>
#include <chrono>
2023-11-06 20:36:10 +00:00
#include <cmath>
2023-10-31 20:11:31 +00:00
2023-11-07 20:24:24 +00:00
#include "include/mergesort_mt.h"
2023-10-31 13:48:54 +00:00
/*
Create a simple sorting application that uses the mergesort algorithm to sort a
large collection (e.g., 10^7 ) of 32-bit integers. The input data and output results
should be stored in files, and the I/O operations should be considered a
sequential part of the application. Mergesort is an algorithm that is considered
appropriate for parallel execution, although it cannot be equally divided between
an arbitrary number of processors, as Amdahls and Gustafson-Barsis laws
require.
Assuming that this equal division is possible, estimate α, i.e., the part of the
program that can be parallelized, by using a profiler like gprof or valgrind to
2023-11-01 09:19:25 +00:00
measure the duration of sorts execution relative to the overall execution
2023-10-31 13:48:54 +00:00
time. Use this number to estimate the predicted speedup for your program.
Does α depend on the size of the input? If it does, how should you modify
your predictions and their graphical illustration?
*/
2023-11-01 09:16:53 +00:00
template<typename T>
auto parse_file(std::ifstream &stream, std::vector<T> &vec) -> void {
std::string buf;
T convbuf;
while (std::getline(stream, buf)) {
convbuf = static_cast<T>(std::stoul(buf));
vec.emplace_back(std::move(convbuf));
}
}
2023-11-01 09:29:35 +00:00
auto main(int argc, char *argv[]) -> int {
2023-11-01 09:00:36 +00:00
try {
2023-11-06 20:51:13 +00:00
const auto path = "dataset.dat";
std::ifstream file(path, std::ios_base::in);
2023-11-01 09:16:53 +00:00
if (!file.is_open()) {
2023-11-06 20:51:13 +00:00
fmt::print("\nError opening file");
2023-11-01 09:16:53 +00:00
return -1;
}
2023-10-31 13:48:54 +00:00
2023-11-06 20:51:13 +00:00
fmt::print("\nOpened file {} sucessfully", path);
2023-11-01 09:16:53 +00:00
std::vector<int32_t> dataset;
2023-10-31 13:48:54 +00:00
2023-11-01 09:16:53 +00:00
parse_file(file, dataset);
2023-11-06 20:51:13 +00:00
fmt::print("\nRead {} values from {}", dataset.size(), path);
2023-10-31 13:48:54 +00:00
auto dataset_par = dataset;
auto dataset_seq = dataset;
2023-10-31 13:48:54 +00:00
auto t1 = std::chrono::high_resolution_clock::now();
2023-11-07 10:45:49 +00:00
MergeSorterMT<int32_t> msst([](int32_t a, int32_t b) {
2023-11-07 10:51:34 +00:00
return (a > b);
2023-11-03 08:03:36 +00:00
}, 0);
2023-11-07 10:45:49 +00:00
msst.sort(dataset_seq);
2023-11-01 09:16:53 +00:00
auto t2 = std::chrono::high_resolution_clock::now();
2023-11-06 20:36:10 +00:00
auto t_seq = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1);
2023-11-06 20:51:13 +00:00
fmt::print("\nSorted {} entries within {} ms in sequential", dataset_seq.size(), t_seq.count());
2023-11-01 11:32:52 +00:00
2023-11-06 20:36:10 +00:00
const int threads = std::thread::hardware_concurrency();
2023-11-07 11:16:08 +00:00
const int max_depth = std::sqrt(threads);
2023-11-06 20:36:10 +00:00
t1 = std::chrono::high_resolution_clock::now();
2023-11-07 10:51:34 +00:00
MergeSorterMT<int32_t> msmt([](int32_t a, int32_t b) {
return (a > b);
2023-11-06 20:36:10 +00:00
}, max_depth);
2023-11-07 10:45:49 +00:00
msmt.sort(dataset_par);
t2 = std::chrono::high_resolution_clock::now();
2023-11-06 20:36:10 +00:00
auto t_par = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1);
2023-11-07 10:51:34 +00:00
fmt::print("\nSorted {} entries within {} ms in parallel on a system having {} threads and a recursion depth of {}"
"\nresulting in a total count of {} threads",
2023-11-07 11:06:28 +00:00
dataset_seq.size(), t_par.count(), threads, max_depth, std::pow(2, max_depth));
auto eq = (dataset_seq == dataset_par);
2023-11-07 10:51:34 +00:00
fmt::print("\nCheck whether sorted arrays are equal: {}", (eq) ? "Equal" : "not equal");
2023-11-06 20:36:10 +00:00
2023-11-06 20:51:13 +00:00
fmt::print("\n\n------------Summary------------");
fmt::print("\nt_seq = {: > 5.2f} ms", static_cast<float>(t_seq.count()));
fmt::print("\nt_par = {: > 5.2f} ms", static_cast<float>(t_par.count()));
2023-11-07 10:51:34 +00:00
fmt::print("\nspeedup = {: > 5.2f}", (1.0 * t_seq / t_par));
2023-11-06 20:51:13 +00:00
fmt::print("\nDelta_t = {: > 5.2f} ms", static_cast<float>(t_seq.count() - t_par.count()));
fmt::print("\n-------------------------------");
std::ofstream ofile("dataset.out.dat", std::ios_base::out);
2023-11-07 10:51:34 +00:00
if (!ofile.is_open()) {
2023-11-06 20:51:13 +00:00
fmt::print("\nError writing to file");
return -1;
}
2023-11-07 10:51:34 +00:00
for (auto &element: dataset_seq) {
ofile << std::to_string(element) << '\n';
}
file.close();
ofile.flush();
ofile.close();
2023-11-06 20:51:13 +00:00
fmt::print("\nWritten to output file");
2023-10-31 20:11:31 +00:00
2023-11-01 09:00:36 +00:00
return 0;
2023-10-31 20:11:31 +00:00
2023-11-03 10:41:58 +00:00
} catch (std::exception &e) {
2023-11-06 20:51:13 +00:00
fmt::print("\nError occured: {}", e.what());
2023-11-01 09:00:36 +00:00
return -1;
2023-10-31 20:11:31 +00:00
}
2023-10-31 13:48:54 +00:00
}