Initial Commit

This commit is contained in:
Robin Dietzel 2023-10-31 14:48:54 +01:00
commit e0de035a9c
5 changed files with 121 additions and 0 deletions

3
.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
cmake-build-debug
.idea
task1/dataset.dat

30
CMakeLists.txt Normal file
View File

@ -0,0 +1,30 @@
cmake_minimum_required(VERSION 3.0)
project(task1)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_AUTOMOC ON)
set(CMAKE_AUTORCC ON)
set(CMAKE_AUTOUIC ON)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
find_package(Qt6 COMPONENTS
Core
REQUIRED)
find_package(fmt)
add_executable(task1_7 task1/main.cpp)
add_custom_command(OUTPUT dataset.dat
DEPENDS task1/dataset.dat
COMMAND cmake -E copy_if_different ${CMAKE_SOURCE_DIR}/task1/dataset.dat ${CMAKE_BINARY_DIR}/dataset.dat
COMMENT "Copying dataset")
add_custom_target(task1_7_dataset DEPENDS dataset.dat)
add_dependencies(task1_7 task1_7_dataset)
target_link_libraries(task1_7
fmt::fmt
Qt::Core)

31
docker/Dockerfile Normal file
View File

@ -0,0 +1,31 @@
ARG UBUNTU_VERSION="23.04"
FROM ubuntu:${UBUNTU_VERSION}
# Build environment
# Profiling and analyzer tools
RUN --mount=type=bind,target=/var/lib/apt \
apt-get -y update \
&& DEBIAN_FRONTEND=noninteractive TZ=Europe/Berlin apt-get -y install tzdata apt-utils \
&& apt-get -y upgrade \
&& apt-get -y install python3 python3-pip lsb-release software-properties-common \
rename wget git ninja-build make gdb gdbserver pkg-config \
gperf linux-tools-generic linux-tools-common valgrind \
&& apt-get clean
# CMake
ENV CMAKE_VERSION="3.28.0-rc2"
RUN wget -P /opt https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.sh \
&& bash /opt/cmake-${CMAKE_VERSION}-linux-x86_64.sh --skip-license --prefix=/usr \
&& rm /opt/cmake-${CMAKE_VERSION}-linux-x86_64.sh
# Conan
ENV CONAN_VERSION="1.61.0"
ARG UBUNTU_VERSION
SHELL ["/bin/bash", "-c"]
RUN env; if [[ ${UBUNTU_VERSION} == "23.04" || ${UBUNTU_VERSION} == "23.10" ]]; then \
pip3 install --break-system-packages conan==${CONAN_VERSION}; else \
pip3 install conan==${CONAN_VERSION}; fi
ARG GCC_VERSION="13"
RUN apt-get -y install gcc-${GCC_VERSION} g++-${GCC_VERSION}

14
task1/dataset-gen.py Normal file
View File

@ -0,0 +1,14 @@
import numpy as np
import os
randomizer = np.random.default_rng(42)
randvec = randomizer.integers(low=0, high=pow(2, 32), size=pow(10, 7))
print("Generated random vector with {} entries".format(randvec.size))
with open("dataset.dat", "w") as file:
for value in randvec:
file.write("{}\n".format(value))
fsize = os.path.getsize("dataset.dat")
print("File written down with Size {} MB".format(fsize/1000/1000))

43
task1/main.cpp Normal file
View File

@ -0,0 +1,43 @@
#include <fmt/format.h>
#include <QCoreApplication>
#include <QFile>
#include <vector>
#include <printf.h>
/*
Create a simple sorting application that uses the mergesort algorithm to sort a
large collection (e.g., 10^7 ) of 32-bit integers. The input data and output results
should be stored in files, and the I/O operations should be considered a
sequential part of the application. Mergesort is an algorithm that is considered
appropriate for parallel execution, although it cannot be equally divided between
an arbitrary number of processors, as Amdahls and Gustafson-Barsis laws
require.
Assuming that this equal division is possible, estimate α, i.e., the part of the
program that can be parallelized, by using a profiler like gprof or valgrind to
measure the duration of mergesorts execution relative to the overall execution
time. Use this number to estimate the predicted speedup for your program.
Does α depend on the size of the input? If it does, how should you modify
your predictions and their graphical illustration?
*/
int main(int argc, char *argv[]) {
QFile file("dataset.dat");
if(!file.open(QIODevice::ReadOnly | QIODevice::Text)) {
//qDebug() << "Could not open file";
return 0;
}
fmt::print("Opened file {} sucessfully!\n", file.fileName().toStdString());
std::vector<int32_t> dataset;
int counter = 0;
while(!file.atEnd()) {
dataset.emplace_back(file.readLine().toInt());
}
fmt::print("Read {} values from {}\n", dataset.size(), file.fileName().toStdString());
return 0;
}