commit 6014a65251290ae396840d28a7884059d2389d12
parent 0e63b7f7e00c38ad209ffb9e75a5e1476c482075
Author: Ivan Gankevich <i.gankevich@spbu.ru>
Date: Mon, 12 Apr 2021 12:30:50 +0300
intro wip
Diffstat:
2 files changed, 47 insertions(+), 0 deletions(-)
diff --git a/main.bib b/main.bib
@@ -0,0 +1,15 @@
+
+@InProceedings{llvm,
+ author = {Lattner, Chris and Adve, Vikram},
+ title = {LLVM: A Compilation Framework for Lifelong Program Analysis
+ \& Transformation},
+ year = {2004},
+ isbn = {0769521029},
+ publisher = {IEEE Computer Society},
+ address = {USA},
+ booktitle = {Proceedings of the International Symposium on Code Generation
+ and Optimization: Feedback-Directed and Runtime Optimization},
+ pages = {75},
+ location = {Palo Alto, California},
+ series = {CGO '04}
+}
diff --git a/main.tex b/main.tex
@@ -31,12 +31,44 @@
\keywords{%
TODO
+\and intermediate language
+\and C++
\and Guile.
}
\end{abstract}
\section{Introduction}
+There are many programming frameworks for parallel and distributed computing
+(TODO cite) which are successful both in industry and academia, however, all
+these frameworks are isolated and self-contained. We believe that the main
+reason that there is no common denominator between these frameworks is that
+there is no protocol or low-level language for distributed computations. For
+sequential computations we have bytecode (e.g.~LLVM~\cite{llvm}, Java bytecode,
+Guile bytecode) that is used as an intermediate, portable and universal
+representation of a programme written in any language; also we have assembler
+which is non-portable and non-universal, but still popular intermediate
+representation. One important feature, that bytecode and assembler lack, is an
+ability to communicate between parallel processes. This communication is the
+common denominator on top of which all the frameworks are built, and there is
+no universal low-level protocol or language that describes communication.
+
+TODO expand
+
+In this paper we describe low-level language and protocol called \emph{kernels}
+which is suitable for distributed and parallel computations. We implement
+kernels in C++ and build a reference cluster scheduler that uses kernels as the
+protocol to run applications that span multiple cluster nodes. Then we use
+kernels as an intermediate representation for Guile programming language, run
+benchmarks using the scheduler and compare the performance of different
+implementations of the same programme.
+
+Spark
+
+Distributed Haskell
+
+
+
\section{Methods}
\section{Results}