Compare commits
6 Commits
ed193c4fad
...
edde2e75fb
Author | SHA1 | Date | |
---|---|---|---|
edde2e75fb | |||
af34983a46 | |||
c6cdc7ea6e | |||
a818b9af66 | |||
87dc5c0f49 | |||
b8a3255cf5 |
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,3 +1,3 @@
|
|||||||
cmake-build-*
|
.out
|
||||||
.idea
|
.idea
|
||||||
task1/dataset.dat
|
task1/dataset.dat
|
@ -1,21 +1,28 @@
|
|||||||
cmake_minimum_required(VERSION 3.0)
|
cmake_minimum_required(VERSION 3.25)
|
||||||
project(aca-tasks)
|
project(aca-tasks)
|
||||||
|
|
||||||
|
# Set C++ standard
|
||||||
set(CMAKE_CXX_STANDARD 20)
|
set(CMAKE_CXX_STANDARD 20)
|
||||||
set(CMAKE_INCLUDE_CURRENT_DIR ON)
|
|
||||||
|
|
||||||
|
# Set install directory
|
||||||
set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/installed)
|
set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/installed)
|
||||||
|
|
||||||
|
# Set compiler flags for optimization on Release build
|
||||||
if (${CMAKE_BUILD_TYPE} STREQUAL "Release")
|
if (${CMAKE_BUILD_TYPE} STREQUAL "Release")
|
||||||
|
# Compiler options
|
||||||
add_compile_options(
|
add_compile_options(
|
||||||
-Wall
|
-Wall
|
||||||
-Wpedantic
|
-Wpedantic
|
||||||
-O3
|
-O3
|
||||||
-g3
|
-g3)
|
||||||
)
|
|
||||||
|
# Defines for some libraries
|
||||||
add_compile_definitions(
|
add_compile_definitions(
|
||||||
NDEBUG)
|
NDEBUG)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
# Include general purpose libraries to build
|
||||||
add_subdirectory(third-party/fmt)
|
add_subdirectory(third-party/fmt)
|
||||||
|
|
||||||
|
# Include CMakeLists files from subdirs for specific tasks
|
||||||
add_subdirectory(task1)
|
add_subdirectory(task1)
|
20
CMakePresets.json
Normal file
20
CMakePresets.json
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"version": 6,
|
||||||
|
"cmakeMinimumRequired": {
|
||||||
|
"major": 3,
|
||||||
|
"minor": 25,
|
||||||
|
"patch": 0
|
||||||
|
},
|
||||||
|
"configurePresets": [
|
||||||
|
{
|
||||||
|
"name": "task1@release",
|
||||||
|
"displayName": "Task1 Release build",
|
||||||
|
"description": "Builds the targets of task1 as release",
|
||||||
|
"generator": "Ninja",
|
||||||
|
"binaryDir": "${sourceDir}/.out/task1-release",
|
||||||
|
"cacheVariables": {
|
||||||
|
"CMAKE_BUILD_TYPE": "Release"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
@ -1,29 +1,56 @@
|
|||||||
find_package(Python3 COMPONENTS Interpreter REQUIRED)
|
# Find packages necessary for this application
|
||||||
|
find_package(Qt6 COMPONENTS Core REQUIRED)
|
||||||
find_package(fmt)
|
find_package(fmt)
|
||||||
|
|
||||||
|
# Search for python to generate the test dataset
|
||||||
|
find_package(Python3 COMPONENTS Interpreter REQUIRED)
|
||||||
|
|
||||||
|
# Generate random dataset
|
||||||
add_custom_command(OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/dataset.dat
|
add_custom_command(OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/dataset.dat
|
||||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/dataset-gen.py
|
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/dataset-gen.py
|
||||||
COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/dataset-gen.py
|
COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/dataset-gen.py
|
||||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
COMMENT "Generating random dataset")
|
COMMENT "Generating random dataset")
|
||||||
|
# Copy random dataset to binary dir
|
||||||
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/dataset.dat
|
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/dataset.dat
|
||||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/dataset.dat
|
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/dataset.dat
|
||||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_SOURCE_DIR}/dataset.dat ${CMAKE_CURRENT_BINARY_DIR}/dataset.dat
|
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_SOURCE_DIR}/dataset.dat ${CMAKE_CURRENT_BINARY_DIR}/dataset.dat
|
||||||
COMMENT "Copying dataset")
|
COMMENT "Copying dataset")
|
||||||
|
|
||||||
add_custom_target(task1_7_dataset
|
add_custom_target(task1-dataset
|
||||||
DEPENDS
|
DEPENDS
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/dataset.dat
|
${CMAKE_CURRENT_SOURCE_DIR}/dataset.dat
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/dataset.dat)
|
${CMAKE_CURRENT_BINARY_DIR}/dataset.dat)
|
||||||
|
|
||||||
add_executable(task1_7 main.cpp
|
|
||||||
mergesort_mt.h)
|
|
||||||
|
|
||||||
target_link_libraries(task1_7 PRIVATE
|
# Add task1 automated target (Automatically loads generated dataset
|
||||||
fmt::fmt)
|
add_executable(task1-auto)
|
||||||
|
add_dependencies(task1-auto task1-dataset)
|
||||||
|
target_sources(task1-auto PRIVATE
|
||||||
|
src/task1-auto.cpp)
|
||||||
|
target_include_directories(task1-auto PRIVATE
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/include)
|
||||||
|
target_link_libraries(task1-auto PRIVATE
|
||||||
|
fmt::fmt)
|
||||||
|
|
||||||
|
# Add task1 target with an C++ written random data generator
|
||||||
|
add_executable(task1-randgen)
|
||||||
|
target_sources(task1-randgen PRIVATE
|
||||||
|
src/task1-randgen.cpp)
|
||||||
|
target_link_libraries(task1-randgen PRIVATE
|
||||||
|
Qt6::Core)
|
||||||
|
|
||||||
install(TARGETS task1_7 DESTINATION bin)
|
# Add task1 target with an C++ written QT-based mergesort
|
||||||
install(IMPORTED_RUNTIME_ARTIFACTS task1_7 DESTINATION bin)
|
add_executable(task1-sorter)
|
||||||
|
target_sources(task1-sorter PRIVATE
|
||||||
|
src/task1-sorter.cpp)
|
||||||
|
target_include_directories(task1-sorter PRIVATE
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/include)
|
||||||
|
target_link_libraries(task1-sorter PRIVATE
|
||||||
|
Qt6::Core)
|
||||||
|
|
||||||
add_dependencies(task1_7 task1_7_dataset)
|
# Define install options
|
||||||
|
install(TARGETS task1-auto DESTINATION bin)
|
||||||
|
install(TARGETS task1-randgen DESTINATION bin)
|
||||||
|
install(IMPORTED_RUNTIME_ARTIFACTS task1-auto DESTINATION lib)
|
||||||
|
install(IMPORTED_RUNTIME_ARTIFACTS task1-randgen DESTINATION lib)
|
@ -4,22 +4,30 @@
|
|||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
|
||||||
|
// General purpose mergesorter with multi threading support by Robin Dietzel <robin.dietzel@iem.thm.de>
|
||||||
template<typename T>
|
template<typename T>
|
||||||
class MergeSorterMT {
|
class MergeSorterMT {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
template<typename C>
|
template<typename C>
|
||||||
MergeSorterMT(C cmp, int max_depth) : cmp(cmp), max_depth(max_depth) {
|
MergeSorterMT(C cmp, int max_depth) : cmp(cmp), max_depth(max_depth) {
|
||||||
|
// Assert that cmp is a function that returns bool and takes two arguments of type T
|
||||||
static_assert(std::is_same<std::invoke_result_t<C, T, T>, bool>(), "C must be a function that returns a bool");
|
static_assert(std::is_same<std::invoke_result_t<C, T, T>, bool>(), "C must be a function that returns a bool");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start sorting process
|
||||||
auto sort(std::vector<T> &data) -> void {
|
auto sort(std::vector<T> &data) -> void {
|
||||||
|
// Create span: like a 'view' on the vector -> no unnecessary copies are made when subdividing sorting problem
|
||||||
std::span<T> sortable(data);
|
std::span<T> sortable(data);
|
||||||
split(sortable, 0, max_depth);
|
split(sortable, 0, max_depth);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
// Merge function that merges left & right span into the output span
|
||||||
|
// No exclusive access on output is necessary (e.g. via mutex) because all parallel threads work on different parts of output
|
||||||
auto merge(std::span<T> &output, std::span<T> left, std::span<T> right) -> void {
|
auto merge(std::span<T> &output, std::span<T> left, std::span<T> right) -> void {
|
||||||
|
// Create buffer, here we need a temporary container where we copy values to, because left and right are a view on parts
|
||||||
|
// of output
|
||||||
std::vector<T> buf;
|
std::vector<T> buf;
|
||||||
buf.reserve(left.size() + right.size());
|
buf.reserve(left.size() + right.size());
|
||||||
|
|
||||||
@ -27,6 +35,7 @@ private:
|
|||||||
auto r = right.begin();
|
auto r = right.begin();
|
||||||
auto o = buf.begin();
|
auto o = buf.begin();
|
||||||
|
|
||||||
|
// Insert from pre sorted half's
|
||||||
while (l < left.end() && r < right.end()) {
|
while (l < left.end() && r < right.end()) {
|
||||||
if (cmp(*l, *r)) {
|
if (cmp(*l, *r)) {
|
||||||
buf.insert(o, *l);
|
buf.insert(o, *l);
|
||||||
@ -37,52 +46,72 @@ private:
|
|||||||
}
|
}
|
||||||
o++;
|
o++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fill up with rest of left values
|
||||||
while (l < left.end()) {
|
while (l < left.end()) {
|
||||||
buf.insert(o, *l);
|
buf.insert(o, *l);
|
||||||
o++;
|
o++;
|
||||||
l++;
|
l++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fill up with rest of right values
|
||||||
while (r < right.end()) {
|
while (r < right.end()) {
|
||||||
buf.insert(o, *r);
|
buf.insert(o, *r);
|
||||||
o++;
|
o++;
|
||||||
r++;
|
r++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Completely move buffer to output
|
||||||
|
// IMPORTANT: left and right are still a view on the splitted output, that is now sorted
|
||||||
std::move(buf.begin(), buf.end(), output.begin());
|
std::move(buf.begin(), buf.end(), output.begin());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Splitup function
|
||||||
auto split(std::span<T> &data, int depth, const int &mdepth) -> void {
|
auto split(std::span<T> &data, int depth, const int &mdepth) -> void {
|
||||||
|
|
||||||
if (std::distance(data.begin(), data.end()) <= 1) {
|
if (std::distance(data.begin(), data.end()) <= 1) {
|
||||||
|
// Quit if only one element 'insortable'
|
||||||
return;
|
return;
|
||||||
} else if (std::distance(data.begin(), data.end()) == 2) {
|
} else if (std::distance(data.begin(), data.end()) == 2) {
|
||||||
|
// Swap two values dependant on size for small speedup (no call to further split must be made)
|
||||||
if(cmp(data[1], data[0])) {
|
if(cmp(data[1], data[0])) {
|
||||||
std::swap(data[0], data[1]);
|
std::swap(data[0], data[1]);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Determine mid of data
|
||||||
auto mid = data.begin();
|
auto mid = data.begin();
|
||||||
std::advance(mid, std::distance(data.begin(), data.end()) / 2);
|
std::advance(mid, std::distance(data.begin(), data.end()) / 2);
|
||||||
|
|
||||||
|
// Generate left and right view on data (no copies are made here)
|
||||||
std::span<T> left(data.begin(), mid);
|
std::span<T> left(data.begin(), mid);
|
||||||
std::span<T> right(mid, data.end());
|
std::span<T> right(mid, data.end());
|
||||||
|
|
||||||
if (depth < mdepth) {
|
if (depth < mdepth) {
|
||||||
|
// Create recursive split functions if maximum depth not reached
|
||||||
std::thread left_thread([&]() { split(left, depth + 1, mdepth); });
|
std::thread left_thread([&]() { split(left, depth + 1, mdepth); });
|
||||||
std::thread right_thread([&]() { split(right, depth + 1, mdepth); });
|
std::thread right_thread([&]() { split(right, depth + 1, mdepth); });
|
||||||
|
|
||||||
|
// Both threads must join before we could further work on the data viewed
|
||||||
|
// by left and right (recursively sorted by the both calls)
|
||||||
left_thread.join();
|
left_thread.join();
|
||||||
right_thread.join();
|
right_thread.join();
|
||||||
} else {
|
} else {
|
||||||
|
// Do normal recursion in a single thread if maximum depth is reached
|
||||||
split(left, depth + 1, mdepth);
|
split(left, depth + 1, mdepth);
|
||||||
split(right, depth + 1, mdepth);
|
split(right, depth + 1, mdepth);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Merge left and right together before returning
|
||||||
merge(data, left, right);
|
merge(data, left, right);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
// Templated comparator function
|
||||||
std::function<bool(T, T)> cmp;
|
std::function<bool(T, T)> cmp;
|
||||||
|
// Maximum depth
|
||||||
const int max_depth;
|
const int max_depth;
|
||||||
};
|
};
|
@ -1,379 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <vector>
|
|
||||||
#include <algorithm>
|
|
||||||
#include <thread>
|
|
||||||
#include <mutex>
|
|
||||||
#include <future>
|
|
||||||
#include <ranges>
|
|
||||||
#include <span>
|
|
||||||
|
|
||||||
namespace algo {
|
|
||||||
|
|
||||||
class MergeSort_v1 {
|
|
||||||
private:
|
|
||||||
template<typename Iterator, typename Comparator>
|
|
||||||
static auto
|
|
||||||
merge(Iterator start, Iterator middle, Iterator end, Comparator cmp, Iterator output_start) -> void {
|
|
||||||
Iterator start_m = start;
|
|
||||||
Iterator begin = output_start;
|
|
||||||
Iterator start2 = middle + 1;
|
|
||||||
|
|
||||||
//merge from input until one half completes
|
|
||||||
while (start <= middle && start2 <= end) {
|
|
||||||
if (cmp(*start, *start2)) {
|
|
||||||
*output_start = *start;
|
|
||||||
start++;
|
|
||||||
} else {
|
|
||||||
*output_start = *start2;
|
|
||||||
start2++;
|
|
||||||
}
|
|
||||||
output_start++;
|
|
||||||
}
|
|
||||||
|
|
||||||
//try to finish first half
|
|
||||||
while (start <= middle) {
|
|
||||||
*output_start = *start;
|
|
||||||
start++;
|
|
||||||
output_start++;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (start2 <= end) {
|
|
||||||
*output_start = *start2;
|
|
||||||
start2++;
|
|
||||||
output_start++;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto size = std::distance(start_m, end);
|
|
||||||
for (auto i = 0; i <= size; i++, start_m++, begin++) {
|
|
||||||
*start_m = *begin;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename Container, typename Iterator, typename Comparator>
|
|
||||||
static auto
|
|
||||||
ms_split(Container &output_vec, Iterator start, Iterator end, Comparator cmp, Iterator output_start) -> void {
|
|
||||||
Iterator mid = start;
|
|
||||||
Iterator begin = output_start;
|
|
||||||
|
|
||||||
if (std::distance(start, end) < 1) {
|
|
||||||
return;
|
|
||||||
} else {
|
|
||||||
//move mid iterator litterally to the mid
|
|
||||||
std::advance(mid, std::distance(start, end) / 2);
|
|
||||||
//sort the first half within an recursion
|
|
||||||
ms_split(output_vec, start, mid, cmp, output_start);
|
|
||||||
|
|
||||||
//move output iterator
|
|
||||||
std::advance(output_start, std::distance(start, mid + 1));
|
|
||||||
//sort the second half within a recursion
|
|
||||||
ms_split(output_vec, mid + 1, end, cmp, output_start);
|
|
||||||
|
|
||||||
//merge everything together starting from the complete beginning
|
|
||||||
merge(start, mid, end, cmp, begin);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
template<typename Iterator, typename Comparator>
|
|
||||||
static auto sort(Iterator start, Iterator end, Comparator cmp) -> void {
|
|
||||||
using valtype = typename std::iterator_traits<Iterator>::value_type;
|
|
||||||
std::vector<valtype> temporary_dataset(std::distance(start, end));
|
|
||||||
ms_split(temporary_dataset, start, end - 1, cmp, temporary_dataset.begin());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class MergeSort_v2 {
|
|
||||||
private:
|
|
||||||
|
|
||||||
template<typename Container, typename Comparator>
|
|
||||||
static auto
|
|
||||||
mt_merge(Container left, Container right, Comparator cmp) -> Container {
|
|
||||||
//using Iterator = typename std::iterator_traits<Container>::value_type;
|
|
||||||
|
|
||||||
Container output;
|
|
||||||
|
|
||||||
auto lefti = left.begin();
|
|
||||||
auto righti = right.begin();
|
|
||||||
|
|
||||||
while (lefti < left.end() && righti < right.end()) {
|
|
||||||
if (cmp(*lefti, *righti)) {
|
|
||||||
output.emplace_back(std::move(*lefti));
|
|
||||||
lefti++;
|
|
||||||
} else {
|
|
||||||
output.emplace_back(std::move(*righti));
|
|
||||||
righti++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
while (lefti < left.end()) {
|
|
||||||
output.emplace_back(std::move(*lefti));
|
|
||||||
lefti++;
|
|
||||||
}
|
|
||||||
while (righti < right.end()) {
|
|
||||||
output.emplace_back(std::move(*righti));
|
|
||||||
righti++;
|
|
||||||
}
|
|
||||||
|
|
||||||
return output;
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename Iterator, typename Comparator>
|
|
||||||
static auto
|
|
||||||
merge(Iterator start, Iterator middle, Iterator end, Comparator cmp, Iterator output_start,
|
|
||||||
std::recursive_mutex &dataset_guard) -> void {
|
|
||||||
Iterator start_m = start;
|
|
||||||
Iterator begin = output_start;
|
|
||||||
Iterator start2 = middle + 1;
|
|
||||||
|
|
||||||
//merge from input until one half completes
|
|
||||||
while (start <= middle && start2 <= end) {
|
|
||||||
if (cmp(*start, *start2)) {
|
|
||||||
*output_start = *start;
|
|
||||||
start++;
|
|
||||||
} else {
|
|
||||||
*output_start = *start2;
|
|
||||||
start2++;
|
|
||||||
}
|
|
||||||
output_start++;
|
|
||||||
}
|
|
||||||
|
|
||||||
//try to finish first half
|
|
||||||
while (start <= middle) {
|
|
||||||
*output_start = *start;
|
|
||||||
start++;
|
|
||||||
output_start++;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (start2 <= end) {
|
|
||||||
*output_start = *start2;
|
|
||||||
start2++;
|
|
||||||
output_start++;
|
|
||||||
}
|
|
||||||
|
|
||||||
dataset_guard.lock();
|
|
||||||
const auto size = std::distance(start_m, end);
|
|
||||||
for (auto i = 0; i <= size; i++, start_m++, begin++) {
|
|
||||||
*start_m = *begin;
|
|
||||||
}
|
|
||||||
dataset_guard.unlock();
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename Container, typename Iterator, typename Comparator>
|
|
||||||
static auto mt_split(Container &output_vec, Iterator start, Iterator end, Comparator cmp, Iterator output_start,
|
|
||||||
int &nthreads, std::recursive_mutex &dataset_guard, std::mutex &depth_guard) -> void {
|
|
||||||
Iterator mid = start;
|
|
||||||
Iterator to_start = output_start;
|
|
||||||
|
|
||||||
if (std::distance(start, end) < 1) {
|
|
||||||
return;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
bool rem_threads;
|
|
||||||
{
|
|
||||||
std::lock_guard guard(depth_guard); //RAII guard
|
|
||||||
rem_threads = nthreads > 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rem_threads) {
|
|
||||||
{
|
|
||||||
std::lock_guard guard(depth_guard); //RAII guard
|
|
||||||
nthreads -= 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::advance(mid, std::distance(start, end) / 2);
|
|
||||||
std::thread t1([&]() {
|
|
||||||
mt_split(output_vec, start, mid, cmp, output_start, nthreads, dataset_guard, depth_guard);
|
|
||||||
});
|
|
||||||
|
|
||||||
std::advance(output_start, std::distance(start, mid + 1));
|
|
||||||
std::thread t2([&]() {
|
|
||||||
mt_split(output_vec, mid + 1, end, cmp, output_start, nthreads, dataset_guard, depth_guard);
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
//merge everything together starting from the complete beginning
|
|
||||||
t1.join();
|
|
||||||
t2.join();
|
|
||||||
std::vector<int> left;
|
|
||||||
left.assign(start, mid);
|
|
||||||
std::vector<int> right;
|
|
||||||
right.assign(mid + 1, end);
|
|
||||||
|
|
||||||
mt_merge(left, right, cmp);
|
|
||||||
} else {
|
|
||||||
//move mid iterator litterally to the mid
|
|
||||||
std::advance(mid, std::distance(start, end) / 2);
|
|
||||||
//sort the first half within an recursion
|
|
||||||
mt_split(output_vec, start, mid, cmp, output_start, nthreads, dataset_guard, depth_guard);
|
|
||||||
|
|
||||||
//move output iterator
|
|
||||||
std::advance(output_start, std::distance(start, mid + 1));
|
|
||||||
//sort the second half within a recursion
|
|
||||||
mt_split(output_vec, mid + 1, end, cmp, output_start, nthreads, dataset_guard, depth_guard);
|
|
||||||
|
|
||||||
std::vector<int> left;
|
|
||||||
left.assign(start, mid);
|
|
||||||
std::vector<int> right;
|
|
||||||
right.assign(mid + 1, end);
|
|
||||||
|
|
||||||
mt_merge(left, right, cmp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename Container, typename Iterator, typename Comparator>
|
|
||||||
static auto ms_split(Container &output_vec, Iterator start, Iterator end, Comparator cmp, Iterator output_start,
|
|
||||||
int &nthreads, std::recursive_mutex &dataset_guard, std::mutex &depth_guard) -> void {
|
|
||||||
Iterator mid = start;
|
|
||||||
Iterator begin = output_start;
|
|
||||||
|
|
||||||
if (std::distance(start, end) < 1) {
|
|
||||||
//Quit on smalles list size (one element is always sorted)
|
|
||||||
return;
|
|
||||||
} else {
|
|
||||||
if (nthreads > 1) {
|
|
||||||
depth_guard.lock();
|
|
||||||
nthreads -= 2;
|
|
||||||
depth_guard.unlock();
|
|
||||||
//move mid iterator litterally to the mid
|
|
||||||
std::advance(mid, std::distance(start, end) / 2);
|
|
||||||
//sort the first half within an recursion
|
|
||||||
|
|
||||||
std::thread t1([&]() {
|
|
||||||
ms_split(output_vec, start, mid, cmp, output_start, nthreads, dataset_guard, depth_guard);
|
|
||||||
});
|
|
||||||
|
|
||||||
//move output iteratoroutput_vec, start, mid, cmp, output_start
|
|
||||||
std::advance(output_start, std::distance(start, mid + 1));
|
|
||||||
//sort the second half within a recursion
|
|
||||||
std::thread t2([&]() {
|
|
||||||
ms_split(output_vec, mid + 1, end, cmp, output_start, nthreads, dataset_guard, depth_guard);
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
//merge everything together starting from the complete beginning
|
|
||||||
t1.join();
|
|
||||||
t2.join();
|
|
||||||
merge(start, mid, end, cmp, begin, dataset_guard);
|
|
||||||
} else {
|
|
||||||
//move mid iterator litterally to the mid
|
|
||||||
std::advance(mid, std::distance(start, end) / 2);
|
|
||||||
//sort the first half within an recursion
|
|
||||||
ms_split(output_vec, start, mid, cmp, output_start, nthreads, dataset_guard, depth_guard);
|
|
||||||
|
|
||||||
//move output iterator
|
|
||||||
std::advance(output_start, std::distance(start, mid + 1));
|
|
||||||
//sort the second half within a recursion
|
|
||||||
ms_split(output_vec, mid + 1, end, cmp, output_start, nthreads, dataset_guard, depth_guard);
|
|
||||||
|
|
||||||
//merge everything together starting from the complete beginning
|
|
||||||
merge(start, mid, end, cmp, begin, dataset_guard);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
template<typename Iterator, typename Comparator>
|
|
||||||
static auto sort(Iterator start, Iterator end, Comparator cmp, int nthreads) -> void {
|
|
||||||
using valtype = typename std::iterator_traits<Iterator>::value_type;
|
|
||||||
std::vector<valtype> temporary_dataset(std::distance(start, end));
|
|
||||||
std::recursive_mutex dataset_guard;
|
|
||||||
std::mutex depth_guard;
|
|
||||||
|
|
||||||
mt_split(temporary_dataset, start, end - 1, cmp, temporary_dataset.begin(), nthreads, dataset_guard,
|
|
||||||
depth_guard);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class MergeSort_mt {
|
|
||||||
|
|
||||||
template<typename T, typename Comparator>
|
|
||||||
static auto
|
|
||||||
merge(std::vector<T> left, std::vector<T> right,
|
|
||||||
Comparator cmp, std::mutex &mut) -> std::vector<T> {
|
|
||||||
|
|
||||||
std::vector<T> output;
|
|
||||||
output.reserve(left.size() + right.size());
|
|
||||||
|
|
||||||
auto l = left.begin();
|
|
||||||
auto r = right.begin();
|
|
||||||
|
|
||||||
auto o = output.begin();
|
|
||||||
|
|
||||||
while (l < left.end() && r < right.end()) {
|
|
||||||
if (cmp(*l, *r)) {
|
|
||||||
output.insert(o, *l);
|
|
||||||
l++;
|
|
||||||
} else {
|
|
||||||
output.insert(o, *r);
|
|
||||||
r++;
|
|
||||||
}
|
|
||||||
o++;
|
|
||||||
}
|
|
||||||
while (l < left.end()) {
|
|
||||||
output.insert(o, *l);
|
|
||||||
o++;
|
|
||||||
l++;
|
|
||||||
}
|
|
||||||
while (r < right.end()) {
|
|
||||||
output.insert(o, *r);
|
|
||||||
o++;
|
|
||||||
r++;
|
|
||||||
}
|
|
||||||
return output;
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename T, typename Comparator>
|
|
||||||
static auto split(std::vector<T> data, Comparator cmp, int depth, int &max_depth,
|
|
||||||
std::mutex &mut) -> std::vector<T>{
|
|
||||||
|
|
||||||
if (data.size() <= 1) {
|
|
||||||
return data;
|
|
||||||
} else if (data.size() == 2) {
|
|
||||||
if(cmp(data[0], data[1])) {
|
|
||||||
return std::vector<T> {data[0], data[1]};
|
|
||||||
} else {
|
|
||||||
return std::vector<T> {data[1], data[0]};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<T> output;
|
|
||||||
output.reserve(data.size());
|
|
||||||
|
|
||||||
auto mid = data.begin();
|
|
||||||
std::advance(mid, std::distance(data.begin(), data.end()) / 2);
|
|
||||||
|
|
||||||
|
|
||||||
std::vector<T> left(data.begin(), mid);
|
|
||||||
std::vector<T> right(mid, data.end());
|
|
||||||
|
|
||||||
if (depth < max_depth) {
|
|
||||||
std::thread left_thread([&]() { left = split(left, cmp, depth + 1, max_depth, mut); });
|
|
||||||
std::thread right_thread([&]() { right = split(right, cmp, depth + 1, max_depth, mut); });
|
|
||||||
|
|
||||||
left_thread.join();
|
|
||||||
right_thread.join();
|
|
||||||
} else {
|
|
||||||
left = split(left, cmp, depth + 1, max_depth, mut);
|
|
||||||
right = split(right, cmp, depth + 1, max_depth, mut);
|
|
||||||
}
|
|
||||||
|
|
||||||
return merge(left, right, cmp, mut);
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
template<typename T, typename Comparator>
|
|
||||||
static auto
|
|
||||||
sort(std::vector<T> &data, Comparator cmp, int max_depth = 0) -> void {
|
|
||||||
std::mutex local_result_lock;
|
|
||||||
std::vector<T> output;
|
|
||||||
output.reserve(data.size());
|
|
||||||
|
|
||||||
output = split(data, cmp, 0, max_depth, local_result_lock);
|
|
||||||
data.assign(output.begin(), output.end());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
@ -1,11 +1,11 @@
|
|||||||
#include <fmt/format.h>
|
#include "fmt/format.h"
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
|
||||||
#include <mergesort_mt.h>
|
#include "mergesort_mt.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Create a simple sorting application that uses the mergesort algorithm to sort a
|
Create a simple sorting application that uses the mergesort algorithm to sort a
|
||||||
@ -32,7 +32,6 @@ auto parse_file(std::ifstream &stream, std::vector<T> &vec) -> void {
|
|||||||
convbuf = static_cast<T>(std::stoul(buf));
|
convbuf = static_cast<T>(std::stoul(buf));
|
||||||
vec.emplace_back(std::move(convbuf));
|
vec.emplace_back(std::move(convbuf));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto main(int argc, char *argv[]) -> int {
|
auto main(int argc, char *argv[]) -> int {
|
64
task1/src/task1-randgen.cpp
Normal file
64
task1/src/task1-randgen.cpp
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
#include <QRandomGenerator>
|
||||||
|
#include <QCommandLineParser>
|
||||||
|
#include <QCoreApplication>
|
||||||
|
#include <QFile>
|
||||||
|
#include <QFileInfo>
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
|
#include <ranges>
|
||||||
|
|
||||||
|
|
||||||
|
auto main(int argc, char *argv[]) -> int{
|
||||||
|
QCoreApplication app(argc, argv);
|
||||||
|
QCoreApplication::setApplicationName("Random dataset generator");
|
||||||
|
QCoreApplication::setApplicationVersion("1.0");
|
||||||
|
|
||||||
|
QCommandLineParser parser;
|
||||||
|
parser.setApplicationDescription("Generates random dataset files for mergesort algorithm testing");
|
||||||
|
parser.addHelpOption();
|
||||||
|
parser.addVersionOption();
|
||||||
|
parser.addPositionalArgument("destination", "Filename of where to place the generated data");
|
||||||
|
parser.addPositionalArgument("num_values", "The power of 10 for the number of values to generate");
|
||||||
|
parser.process(app);
|
||||||
|
|
||||||
|
const QStringList args = parser.positionalArguments();
|
||||||
|
|
||||||
|
if (args.length() != 2) {
|
||||||
|
parser.showHelp(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
const QString dest = args.at(0);
|
||||||
|
bool convOK;
|
||||||
|
const int pow_value = args.at(1).toInt(&convOK);
|
||||||
|
if (!convOK) {
|
||||||
|
parser.showHelp(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
const int num_values = std::pow(10, pow_value);
|
||||||
|
|
||||||
|
QTextStream print(stdout);
|
||||||
|
|
||||||
|
print << "Writing " << num_values << " values into " << dest << Qt::endl;
|
||||||
|
|
||||||
|
QFile file(dest);
|
||||||
|
if (!file.open(QIODevice::WriteOnly | QIODevice::Text)) {
|
||||||
|
print << "Error opening file: " << file.errorString() << Qt::endl;
|
||||||
|
app.exit(-1);
|
||||||
|
return app.exec();
|
||||||
|
}
|
||||||
|
|
||||||
|
QTextStream out(&file);
|
||||||
|
for (int i: std::views::iota(0, num_values)) {
|
||||||
|
out << QString::number(QRandomGenerator::global()->generate()) << '\n';
|
||||||
|
}
|
||||||
|
file.flush();
|
||||||
|
|
||||||
|
QFileInfo finfo(file);
|
||||||
|
print << "Wrote " << num_values << " to " << dest << " with resulting size of " << (finfo.size() / 1000000) << " mb"
|
||||||
|
<< Qt::endl;
|
||||||
|
file.close();
|
||||||
|
|
||||||
|
|
||||||
|
app.exit(0);
|
||||||
|
return 0;
|
||||||
|
}
|
123
task1/src/task1-sorter.cpp
Normal file
123
task1/src/task1-sorter.cpp
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
#include <QRandomGenerator>
|
||||||
|
#include <QCommandLineParser>
|
||||||
|
#include <QCoreApplication>
|
||||||
|
#include <QFile>
|
||||||
|
#include <QFileInfo>
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
|
#include <thread>
|
||||||
|
#include <ranges>
|
||||||
|
#include <chrono>
|
||||||
|
|
||||||
|
#include <mergesort_mt.h>
|
||||||
|
|
||||||
|
auto main(int argc, char *argv[]) -> int {
|
||||||
|
QCoreApplication app(argc, argv);
|
||||||
|
QCoreApplication::setApplicationName("Multi purpose mergesort application");
|
||||||
|
QCoreApplication::setApplicationVersion("1.0.42");
|
||||||
|
QTextStream print(stdout);
|
||||||
|
|
||||||
|
|
||||||
|
QCommandLineParser parser;
|
||||||
|
parser.setApplicationDescription(
|
||||||
|
"Used to run either sequential or parallel mergesort on a texfile containing ascii encoded int32s");
|
||||||
|
parser.addHelpOption();
|
||||||
|
parser.addVersionOption();
|
||||||
|
|
||||||
|
QCommandLineOption sequential("S", "Run sequential sort on dataset");
|
||||||
|
QCommandLineOption parallel("P", "Run parallel sort on dataset");
|
||||||
|
QCommandLineOption nthreads(QStringList() << "d" << "depth", "Recursion depth of parallel part", "nthreads");
|
||||||
|
QCommandLineOption output(QStringList() << "o", "output", "File to write the sorted dataset to");
|
||||||
|
|
||||||
|
parser.addOption(sequential);
|
||||||
|
parser.addOption(parallel);
|
||||||
|
parser.addOption(nthreads);
|
||||||
|
parser.addOption(output);
|
||||||
|
|
||||||
|
parser.addPositionalArgument("dataset", "Filename where to load the data from");
|
||||||
|
parser.process(app);
|
||||||
|
|
||||||
|
const QStringList args = parser.positionalArguments();
|
||||||
|
|
||||||
|
if (args.length() != 1) {
|
||||||
|
parser.showHelp(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
const QString source = args.at(0);
|
||||||
|
QFile input(source);
|
||||||
|
if (!input.open(QIODevice::ReadOnly | QIODevice::Text)) {
|
||||||
|
print << "Could not open file " << source << " for reading" << Qt::endl;
|
||||||
|
print << input.errorString();
|
||||||
|
app.exit(-1);
|
||||||
|
return app.exec();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<int32_t> dataset;
|
||||||
|
|
||||||
|
QTextStream stream(&input);
|
||||||
|
while (!stream.atEnd()) {
|
||||||
|
QString line = stream.readLine();
|
||||||
|
bool ok;
|
||||||
|
int parsed_value = line.toUInt(&ok);
|
||||||
|
if (!ok) {
|
||||||
|
print << "Error converting value: " << line << Qt::endl;
|
||||||
|
} else {
|
||||||
|
dataset.push_back(std::move(parsed_value));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
print << "Read " << dataset.size() << " values from " << source << Qt::endl;
|
||||||
|
|
||||||
|
const int threads = std::thread::hardware_concurrency();
|
||||||
|
int max_depth = std::sqrt(threads);
|
||||||
|
|
||||||
|
print << "Hardware concurrency of " << threads << " detected" << Qt::endl;
|
||||||
|
|
||||||
|
if (parser.isSet(nthreads)) {
|
||||||
|
bool ok;
|
||||||
|
max_depth = parser.value(nthreads).toInt(&ok);
|
||||||
|
if (!ok) {
|
||||||
|
parser.showHelp(-1);
|
||||||
|
}
|
||||||
|
print << "Overwriting maximum parallelized recursion depth with " << max_depth << Qt::endl;
|
||||||
|
} else {
|
||||||
|
print << "Assuming default parallelized recursion depth via sqrt(nthreads) of " << max_depth << Qt::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (parser.isSet(sequential)) {
|
||||||
|
auto buf = dataset;
|
||||||
|
auto t1 = std::chrono::high_resolution_clock::now();
|
||||||
|
MergeSorterMT<int32_t> sorter(
|
||||||
|
[](int32_t a, int32_t b) {
|
||||||
|
return (a > b);
|
||||||
|
}, 0);
|
||||||
|
sorter.sort(buf);
|
||||||
|
auto t2 = std::chrono::high_resolution_clock::now();
|
||||||
|
auto diff = t2 - t1;
|
||||||
|
|
||||||
|
print << "=> Duration for sequential sort: " << std::chrono::duration_cast<std::chrono::milliseconds>(diff).count() << " ms" << Qt::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (parser.isSet(parallel)) {
|
||||||
|
auto buf = dataset;
|
||||||
|
auto t1 = std::chrono::high_resolution_clock::now();
|
||||||
|
MergeSorterMT<int32_t> sorter(
|
||||||
|
[](int32_t a, int32_t b) {
|
||||||
|
return (a > b);
|
||||||
|
}, max_depth);
|
||||||
|
sorter.sort(buf);
|
||||||
|
auto t2 = std::chrono::high_resolution_clock::now();
|
||||||
|
auto diff = t2 - t1;
|
||||||
|
|
||||||
|
print << "=> Duration for parallel sort: " << std::chrono::duration_cast<std::chrono::milliseconds>(diff).count() << " ms" << Qt::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(parser.isSet(output)) {
|
||||||
|
print << "Sooory, not yet implemented :( you might do it yourself!" << Qt::endl;
|
||||||
|
app.exit(-1);
|
||||||
|
return app.exec();
|
||||||
|
}
|
||||||
|
|
||||||
|
app.exit(0);
|
||||||
|
return 0;
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user