% $Id: parallelization.tex 973 2012-08-07 16:03:47Z gryschka $ \input{header_tmp.tex} %\input{../header_lectures.tex} \usepackage[utf8]{inputenc} \usepackage{ngerman} \usepackage{pgf} \usetheme{Dresden} \usepackage{subfigure} \usepackage{units} \usepackage{multimedia} \usepackage{hyperref} \newcommand{\event}[1]{\newcommand{\eventname}{#1}} \usepackage{xmpmulti} \usepackage{tikz} \usetikzlibrary{shapes,arrows,positioning} \usetikzlibrary{decorations.markings} %neues paket \usetikzlibrary{decorations.pathreplacing} %neues paket \def\Tiny{\fontsize{4pt}{4pt}\selectfont} \usepackage{amsmath} \usepackage{amssymb} \usepackage{multicol} \usepackage{pdfcomment} \usepackage{graphicx} \usepackage{listings} \lstset{showspaces=false,language=fortran,basicstyle= \ttfamily,showstringspaces=false,captionpos=b} \institute{Institut für Meteorologie und Klimatologie, Leibniz Universität Hannover} \date{last update: \today} \event{PALM Seminar} \setbeamertemplate{navigation symbols}{} \setbeamertemplate{footline} { \begin{beamercolorbox}[rightskip=-0.1cm]& {\includegraphics[height=0.65cm]{imuk_logo.pdf}\hfill \includegraphics[height=0.65cm]{luh_logo.pdf}} \end{beamercolorbox} \begin{beamercolorbox}[ht=2.5ex,dp=1.125ex, leftskip=.3cm,rightskip=0.3cm plus1fil]{title in head/foot} {\leavevmode{\usebeamerfont{author in head/foot}\insertshortauthor} \hfill \eventname \hfill \insertframenumber \; / \inserttotalframenumber} \end{beamercolorbox} \begin{beamercolorbox}[colsep=1.5pt]{lower separation line foot} \end{beamercolorbox} } %\logo{\includegraphics[width=0.3\textwidth]{luhimuk_logo.pdf}} \title[Parallelization]{Parallelization} \author{Siegfried Raasch} \begin{document} % Folie 1 \begin{frame} \titlepage \end{frame} \section{Parallelization} \subsection{Parallelization} % Folie 2 \begin{frame} \frametitle{Basics of Parallelization} \tikzstyle{yellow} = [rectangle, fill=yellow!20, text width=0.6\textwidth, font=\scriptsize] \scriptsize \textbf{Parallelization:} \begin{itemize} \item<2-> All processor elements (PE, core) are carrying out the same program (SIMD). \item<3-> Each PE of a parallel computer operates on a different set of data. \end{itemize} \ \\ \onslide<4->\textbf{Realization:} \begin{columns}[T] \begin{column}{0.45\textwidth} \onslide<5->each PE solves the equations for a different subdomain of the total domain \begin{center} \includegraphics[width=0.5\textwidth]{parallelization_figures/subdomain.png} \end{center} \onslide<7->each PE only knows the variable values from its subdomain, communication / data exchange between PEs is necessary\\ \onslide<9->\textbf{message passing model (MPI)} \end{column} \begin{column}{0.45\textwidth} \onslide<6->program loops are parallelized, i.e. each processor solves for a subset of the total index range \begin{center} \begin{tikzpicture}[auto, node distance=0] \node [yellow] (1) {% \texttt{!\$OMP DO}\\ \texttt{\quad \quad DO i = 1, 100}\\ \quad \quad \quad $\vdots$\\ \texttt{\quad \quad ENDDO}}; \end{tikzpicture} \end{center} \onslide<8-> parallelization can easily be done by the compiler, if all PEs have access to all variables (shared memory)\\ \onslide<10-> \textbf{shared memory model (OpenMP)} \end{column} \end{columns} \end{frame} % Folie 3 \begin{frame} \frametitle{Basic Architectures of Massively Parallel Computers} \tikzstyle{info} = [rectangle, text width=0.25\textwidth, font=\scriptsize] \begin{center} \begin{tikzpicture} \node (center) at (0,1) {}; \onslide<2-> \node (Network) at (-3.5,1) [draw, ellipse,fill=green!20] {Network}; \node (dis_mem) at (-3.5,-1) [text width=0.28\textwidth] {\footnotesize \textbf{distributed} memory\\(Cray-T3E)}; \onslide<3-> \node (add_mem) at (3.5,1) [rectangle, draw] {adressable memory}; \node (sha_mem) at (3.5,-1) [text width=0.35\textwidth] {\footnotesize \textbf{shared} memory\\(SGI-Altix, multicore PCs)}; \onslide<7-> \node (MPI) at (-3.5,-3) [ellipse,fill=yellow!90] {MPI}; \onslide<8-> \node (OpenMP) at (3.5,-3) [ellipse,fill=yellow!90] {OpenMP}; \onslide<6-> \node (clustered_systems) at (0,-3) [draw, text width=0.15\textwidth] {clustered systems}; \node (cs_info) at (0,-4.2) [text width=0.4\textwidth] {\footnotesize (IBM-Regatta, Linux-Cluster, NEC-SX, SGI-ICE, Cray-XE6)}; % Adressable memory node (big) \onslide<3-> \node (p1) at (2,-0.05) [draw,circle, scale=0.9] {\scriptsize p}; \node (p2) at (2.6,-0.05) [draw,circle, scale=0.9] {\scriptsize p}; \node (p3) at (3.2,-0.05) [draw,circle, scale=0.9] {\scriptsize p}; \node (p4) at (3.8,-0.05) [draw,circle, scale=0.9] {\scriptsize p}; \node (p5) at (4.4,-0.05) [draw,circle, scale=0.9] {\scriptsize p}; \node (p6) at (5,-0.05) [draw,circle, scale=0.9] {\scriptsize p}; \draw[-] (3.5,0.7) -- (3.5,0.4); \draw[-] (2,0.4) -- (5,0.4); \draw[-] (2,0.4) -- (p1); \draw[-] (2.6,0.4) -- (p2); \draw[-] (3.2,0.4) -- (p3); \draw[-] (3.8,0.4) -- (p4); \draw[-] (4.4,0.4) -- (p5); \draw[-] (5,0.4) -- (p6); % Adressable memory node (small) \onslide<4-> \node (small_node) at (-2,0.6) [scale=0.2] {% \begin{tikzpicture} \node (add_mem_small) at (3.5,0.9) [ultra thick, rectangle, draw, minimum width=3cm] {}; \node (p1_small) at (2,-0.05) [ultra thick, draw,circle, scale=0.9] {}; \node (p2_small) at (2.6,-0.05) [ultra thick, draw,circle, scale=0.9] {}; \node (p3_small) at (3.2,-0.05) [ultra thick, draw,circle, scale=0.9] {}; \node (p4_small) at (3.8,-0.05) [ultra thick, draw,circle, scale=0.9] {}; \node (p5_small) at (4.4,-0.05) [ultra thick, draw,circle, scale=0.9] {}; \node (p6_small) at (5,-0.05) [ultra thick, draw,circle, scale=0.9] {}; \draw[-, ultra thick] (add_mem_small.south) -- (3.5,0.4); \draw[-, ultra thick] (2,0.4) -- (5,0.4); \draw[-, ultra thick] (2,0.4) -- (p1_small); \draw[-, ultra thick] (2.6,0.4) -- (p2_small); \draw[-, ultra thick] (3.2,0.4) -- (p3_small); \draw[-, ultra thick] (3.8,0.4) -- (p4_small); \draw[-, ultra thick] (4.4,0.4) -- (p5_small); \draw[-, ultra thick] (5,0.4) -- (p6_small); \end{tikzpicture} } ; \draw[->, thick] (1.5,0.2) -- (small_node) ; \draw[-] (-2.7,0.75) -- (-2.3,0.725); \onslide<5-> \node[below=-0.1cm of small_node] (add_info) [scale=0.9] {\scriptsize node}; % Black Arrows \onslide<6-> \draw[->, thick] (-2.5,-1.5) -- (-0.8,-2.2) ; \draw[->, thick] (2.5,-1.5) -- (0.8,-2.2) ; % MPI Arrows \onslide<7-> \draw[->, ultra thick, color=yellow] (-3.5,-2.7) -- (-3.5,-1.5) ; \draw[->, ultra thick, color=yellow] (-2.9,-3) -- (-1.0,-3.0) ; \draw[->, ultra thick, color=yellow] (-3.0,-2.8) -- (1.5,-1.0) ; % OpenMP Arrows \onslide<8-> \draw[->, ultra thick, color=yellow] (3.5,-2.6) -- (3.5,-1.5) ; \draw[->, ultra thick, color=yellow] (2.5,-2.8) -- (-2.0,0.1) ; % Network decorations \onslide<2-> \node (pr1) at (-4.6,0.7) [draw,circle,scale=0.5] {}; \node (mem1) at (-4.6,0.45) [draw,rectangle,scale=0.5] {}; \draw[-] (-4.5,0.9) -- (pr1); \draw[-] (mem1) -- (pr1); \draw[-] ([xshift=0.02cm]pr1.east) -- ([xshift=0.3cm, yshift=-0.2cm]pr1.east); \draw[-] ([xshift=0.02cm]mem1.east) -- ([xshift=0.3cm, yshift=-0.2cm]mem1.east); \node at (-3.7,0.45) {\tiny processor}; \node at (-3.3,0.25) {\tiny adressable memory}; \node (pr2) at (-4.6,1.3) [draw,circle,scale=0.5] {}; \node (mem2) at (-4.6,1.55) [draw,rectangle,scale=0.5] {}; \draw[-] (-4.5,1.1) -- (pr2); \draw[-] (mem2) -- (pr2); \node (pr3) at (-3.9,1.5) [draw,circle,scale=0.5] {}; \node (mem3) at (-3.9,1.75) [draw,rectangle,scale=0.5] {}; \draw[-] (-3.8,1.3) -- (pr3); \draw[-] (mem3) -- (pr3); \node (pr4) at (-4.9,0.95) [draw,circle,scale=0.5] {}; \node (mem4) at (-4.9,0.7) [draw,rectangle,scale=0.5] {}; \draw[-] (-4.55,1.0) -- (pr4); \draw[-] (mem4) -- (pr4); \node (pr5) at (-3.0,1.5) [draw,circle,scale=0.5] {}; \node (mem5) at (-3.0,1.75) [draw,rectangle,scale=0.5] {}; \draw[-] (-3.08,1.3) -- (pr5); \draw[-] (mem5) -- (pr5); \node (pr6) at (-2.2,1.1) [draw,circle,scale=0.5] {}; \node (mem6) at (-2.2,1.35) [draw,rectangle,scale=0.5] {}; \draw[-] (-2.45,1.0) -- (pr6); \draw[-] (mem6) -- (pr6); \onslide<1-> \end{tikzpicture} \end{center} \end{frame} % Folie 4 \begin{frame} \frametitle{PALM Parallelization Model} \scriptsize \onslide<2-> \textbf{General demands for a parallelized program:} \begin{itemize} \item<3-> Load balancing \item<4-> Small communication overhead \item<5-> Scalability (up to large numbers of processors) \end{itemize} \vspace{2mm} \onslide<6-> \textbf{The basic parallelization method used for PALM is a 2D-domain decomposition along $x$ and $y$:}\\ \begin{columns}[T] \begin{column}{0.3\textwidth} \onslide<7-> \includegraphics[width=1.0\textwidth]{parallelization_figures/subdomain.png} \end{column} \begin{column}{0.6\textwidth} \ \\ \onslide<8-> contiguous data in memory (FORTRAN):\\ \ \\ \ \\ \textcolor{blue}{columns of i}\\ \textcolor{red}{no contiguous data at all}\\ \onslide<9-> \textcolor{blue}{columns of k}\\ \textcolor{red}{planes of k,j (all data contiguous)} \end{column} \end{columns} \vspace{2mm} \begin{itemize} \item<10-> Alternatively, a 1D-decomposition along $x$ or $y$ may be used in case of slow networks, but this generally doesn't scale for processor numbers $>$ 256. \vspace{2mm} \item<11-> Message passing is realized using MPI. \vspace{2mm} \item<12-> OpenMP parallelization as well as mixed usage of OpenMP and MPI is also possible. (OpenMP tests and optimization is under way) \end{itemize} \end{frame} % Folie 5 \begin{frame} \frametitle{Implications of Decomposition} \scriptsize \begin{columns}[T] \begin{column}{0.5\textwidth} \begin{itemize} \item<2-> Central finite differences cause \textcolor{red}{local data dependencies}\\ \ \\ solution: introduction of \textcolor{red}{ghost points} \vspace{5mm} \begin{flushright} \onslide<3-> $\left. \dfrac{\partial \psi}{\partial x} \right|_i = \dfrac{\psi_{i+1} - \psi_{i-1}}{2 \Delta x}$ \end{flushright} \vspace{10mm} \item<4-> FFT and linear equation solver cause \textcolor{red}{non-local data dependencies}\\ \ \\ solution: transposition of 3D-arrays \end{itemize} \end{column} \begin{column}{0.6\textwidth} \begin{center} \vspace{-5mm} \onslide<3-> \includegraphics[width=0.7\textwidth]{parallelization_figures/ghost_points.png} \vspace{4mm} \onslide<5-> \includegraphics[width=0.8\textwidth]{parallelization_figures/fft.png} \end{center} \vspace{-4mm} \textbf{Example: transpositions for solving the Poisson\\ \hspace{4em}equation} \end{column} \end{columns} \end{frame} % Folie 6 \begin{frame} \frametitle{How to Use the Parallelized Version of PALM} \scriptsize \begin{columns}[T] \begin{column}{1.12\textwidth} \begin{itemize} \item<1-> The parallel version of PALM is switched on by \texttt{mrun}-option ''\texttt{-K parallel}''. Additionally, the number of required processors and the number of tasks per node (number of PEs to be used on one node) have to be provided:\\ \quad \texttt{mrun ... -K parallel -X64 -T8 ...} \item<2-> From an accounting point of view, it is always most efficient to use all PEs of a node (\texttt{-T8}) (in case of a ''non-shared'' usage of nodes). \item<3-> If a normal unix-kernel operating system (not a micro-kernel) is running on each CPU, then there migth be a speed-up of the code, if 1-2 PEs less than the total number of PEs on the node are used. \item<4-> On machines with a comparably slow network, a 1D-decomposition (along $x$) should be used, because then only two transpositions have to be carried out by the pressure solver. A 1D-decomposition is automatically used for NEC-machines (e.g. \texttt{-h necriam}). The virtual processor grid to be used can be set manually by d3par-parameters \texttt{npex} and \texttt{npey}. \item<6-> Using the Open-MP parallelization does not yield any advantage over using a pure domain decomposition with MPI (contrary to expectations, it mostly slows down the computational speed), but this may change on cluster systems for very large number of processors ($>$10000?).\\ \end{itemize} \begin{center} \vspace{-7mm} \onslide<5-> \includegraphics[width=0.13\textwidth]{parallelization_figures/folie_6.png} \end{center} \end{column} \end{columns} \end{frame} % Folie 7 \begin{frame} \frametitle{MPI Communication} \scriptsize \begin{columns}[T] \begin{column}{1.12\textwidth} \begin{itemize} \item<1-> MPI (message passing interface) is a portable interface for communication between PEs (FORTRAN or C library). \vspace{2mm} \item<2-> To make MPI available on HLRN‘s SGI-ICE, the module \texttt{mpt} must be loaded by setting the \texttt{\%modules} option in .mrun.config appropriately: \quad \texttt{\%modules ...:mpt:...} \vspace{2mm} \item<2-> The path to the MPI-library may have to be given in the compiler call, by setting an appropriate option in the configuration file .mrun.config: \quad \texttt{\%lopts -axW:-cpp:-r8:-nbs:-Vaxlib:\textcolor{blue}{-L::-lmpi}} \vspace{2mm} \item<3-> All MPI calls must be within\\ \quad \texttt{CALL MPI\_INIT( ierror )}\\ \quad $\vdots$\\ \quad \texttt{CALL MPI\_FINALIZE( ierror )}\\ \end{itemize} \end{column} \end{columns} \end{frame} % Folie 8 \begin{frame} \frametitle{Communication Within PALM} \small \begin{itemize} \item<1-> MPI calls within PALM are available when using the \texttt{mrun}-option ''\texttt{-K parallel}''. \item<2-> Communication is needed for \begin{itemize} \footnotesize \item<2-> exchange of ghost points \item<3-> transpositions (FFT-poisson-solver) \item<4-> calculating global sums (e.g. for calculating horizontal averages) \end{itemize} \item<5-> Additional MPI calls are required to define the so-called virtual processor grid and to define special data types needed for more comfortable exchange of data. \end{itemize} \end{frame} % Folie 9 \begin{frame} \frametitle{Virtual Processor Grid Used in PALM} \scriptsize \vspace{2mm} The processor grid and special data types are defined in file \texttt{init\_pegrid.f90}\\ \ \\ \begin{itemize} \item<2-> PALM uses a two-dimensional virtual processor grid (in case of a 1D-decomposition, it has only one element along $y$). It is defined by a so called communicator (here: \texttt{comm2d}):\\ \tiny \vspace{1.5mm} \quad \texttt{ndim = 2}\\ \quad \texttt{pdims(1) = npex \quad ! \# of processors along x}\\ \quad \texttt{pdims(2) = npey \quad ! \# of processors along y}\\ \quad \texttt{cyclic(1) = .TRUE.}\\ \quad \texttt{cyclic(2) = .TRUE.}\\ \ \\ \quad \texttt{CALL MPI\underline{\ }CART\underline{\ }CREATE( MPI\underline{\ }COMM\underline{\ }WORLD, ndim, pdims, cyclic, reorder, \&}\\ \quad \texttt{\hspace{10.5em} \textcolor{blue}{comm2d}, ierr )} \scriptsize \vspace{4mm} \item<3-> The processor number (id) with respect to this processor grid, \texttt{myid}, is given by:\\ \tiny \vspace{1.5mm} \quad \texttt{CALL MPI\underline{\ }COMM\underline{\ }RANK( comm2d, \textcolor{blue}{myid}, ierr )} \scriptsize \vspace{4mm} \item<4-> The ids of the neighbouring PEs are determined by:\\ \tiny \vspace{1.5mm} \quad \texttt{CALL MPI\underline{\ }CARD\underline{\ }SHIFT( comm2d, 0, 1, \textcolor{blue}{pleft}, \textcolor{blue}{pright}, ierr )}\\ \quad \texttt{CALL MPI\underline{\ }CARD\underline{\ }SHIFT( comm2d, 1, 1, \textcolor{blue}{psouth}, \textcolor{blue}{pnorth}, ierr )}\\ \end{itemize} \end{frame} % Folie 10 \begin{frame} \frametitle{Exchange of ghost points} \scriptsize \begin{itemize} \item<1-> Ghost points are stored in additional array elements added at the horizontal boundaries of the subdomains, e.g.\\ \tiny \vspace{2mm} \quad \texttt{u(:,:,nxl\textcolor{blue}{-ngl}), u(:,:,nxr\textcolor{blue}{+ngl}) ! left and right boundary}\\ \quad \texttt{u(:,nys\textcolor{blue}{-ngl},:), u(:,nyn\textcolor{blue}{+ngl},:) ! south and north boundary}\\ \vspace{4mm} \item<2-> \scriptsize The exchange of ghost points is done in file \texttt{exchange\underline{\ }horiz.f90}\\ \textbf{\underline{Simplified} example:} synchroneous exchange of ghost points along $x$ ($yz$-planes, send left, receive right plane):\\ \tiny \vspace{2mm} \quad \texttt{CALL MPI\underline{\ }SENDRECV( ar(nzb,nys-\textcolor{blue}{ngl},nxl), ngp\underline{\ }yz, MPI\underline{\ }REAL, pleft, 0,}\\ \quad \texttt{\hspace{9.5em}ar(nzb,nys-\textcolor{blue}{ngl},nxr+1), ngp\underline{\ }yz, MPI\underline{\ }REAL, pright, 0,}\\ \quad \texttt{\hspace{9.5em}comm2d, status, ierr )}\\ \vspace{4mm} \item<3-> \scriptsize In the real code special MPI data types (vectors) are defined for exchange of $yz$/$xz$-planes for performance reasons and because array elements to be exchanged are not consecutively stored in memory for $xz$-planes:\\ \tiny \vspace{2mm} \quad \texttt{ngp\underline{\ }yz(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * \textcolor{blue}{ngl} )}\\ \quad \texttt{CALL MPI\underline{\ }TYPE\underline{\ }VECTOR( \textcolor{blue}{ngl}, ngp\underline{\ }yz(0), ngp\underline{\ }yz(0), MPI\underline{\ }REAL, type\underline{\ }yz(0), ierr )}\\ \quad \texttt{CALL MPI\underline{\ }TYPE\underline{\ }COMMIT( type\underline{\ }yz(0), ierr ) ! see file init\underline{\ }pegrid.f90}\\ \ \\ \quad \texttt{CALL MPI\underline{\ }SENDRECV( ar(nzb,nys-ngl,nxl), type\underline{\ }yz(grid\underline{\ }level), MPI\underline{\ }REAL, pleft, 0, ...}\\ \end{itemize} \end{frame} % Folie 11 \begin{frame} \frametitle{Transpositions} \footnotesize \begin{columns}[T] \begin{column}{1.05\textwidth} \begin{itemize} \item<1-> Transpositions can be found in file \texttt{transpose.f90} (several subroutines for 1D- or 2D-decompositions; they are called mainly from the FFT pressure solver, see \texttt{poisfft.f90}.\\ \ \\ \item<2-> The following example is for a transposition from $x$ to $y$, i.e. for the input array all data elements along $x$ reside on the same PE, while after the transposition, all elements along $y$ are on the same PE:\\ \ \\ \scriptsize \texttt{!}\\ \texttt{!-- in SUBROUTINE transpose\underline{\ }xy:}\\ \texttt{CALL MPI\underline{\ }ALLTOALL( f\underline{\ }inv(nys\underline{\ }x,nzb\underline{\ }x,0), sendrecvcount\underline{\ }xy, MPI\underline{\ }REAL, \&}\\ \texttt{\hspace{9.5em}work(1), \hspace{6.5em}sendrecvcount\underline{\ }xy, MPI\underline{\ }REAL, \&}\\ \texttt{\hspace{9.5em}comm1dy, ierr )}\\ \ \\ \item<3-> The data resorting before and after the calls of MPI\_ALLTOALL is highly optimized to account for the different processor architectures. \end{itemize} \end{column} \end{columns} \end{frame} % Folie 12 \begin{frame} \frametitle{Parallel I/O} \scriptsize \vspace{-2mm} \begin{columns}[T] \begin{column}{1.1\textwidth} \begin{itemize} \item<1-> PALM writes and reads some of the input/output files in parallel, i.e. each processor writes/reads his own file. \textbf{Each file then has a different name!}\\ \ \\ \textbf{Example:} binary files for restart are written into a subdirectory of the PALM working directory:\\ \quad \texttt{BINOUT/\_0000}\\ \quad \texttt{BINOUT/\_0001}\\ \quad $\vdots$ \item<2-> These files can be handled (copied) by \texttt{mrun} using the file attribute \texttt{pe} in the configuration file:\\ \texttt{BINOUT out:loc:pe restart \~{}/palm/current\underline{\ }version/JOBS/\$fname/RESTART \underline{\ }d3d}\\ \ \\ \onslide<3->In this case, filenames are interpreted as directory names. An \texttt{mrun} call using option\\ ''\texttt{-d example\underline{\ }cbl -r restart}'' will copy the local \textbf{\underline{directory}} \texttt{BINOUT} to the \textbf{\underline{directory}} \texttt{.../RESTART/example\underline{\ }cbl\underline{\ }d3d} . \end{itemize} \onslide<4-> \textbf{General comment:} \begin{itemize} \item Parallel I/O on a large number of files ($>$1000) currently may cause severe file system problems (e.g. on Lustre file systems).\\ \textbf{Workaround:} reduce the maximum number of parallel I/O streams\\ \hspace{5.75em}(see \texttt{mrun}-options) \end{itemize} \end{column} \end{columns} \end{frame} % Folie 13 \begin{frame} \frametitle{PALM Parallel I/O for 2D/3D Data} \footnotesize \begin{itemize} \item<1-> 2D- and 3D-data output is also written in parallel by the processors (2D: by default, 3D: generally). \item<2-> Because the graphics software (\texttt{ncview}, \texttt{ncl}, \texttt{ferret}, etc.) expect the data to be in one file, these output files have to be merged to one single file after PALM has finished.\\ \ \\ This is done within the job by calling the utility program \texttt{combine\underline{\ }plot\underline{\ }fields.x} after PALM has successfully finished. \item<3-> \texttt{combine\underline{\ }plot\underline{\ }fields.x} is automatically executed by \texttt{mrun}. \item<4-> The executable \texttt{combine\underline{\ }plot\underline{\ }fields.x} is created during the installation process by the command\\ \ \\ \quad \texttt{mbuild -u -h } \end{itemize} \end{frame} % Folie 14 \begin{frame} \frametitle{Performance Examples (I)} \begin{itemize} \item Simulation using 1536 * 768 * 242 grid points ($\sim$ 60 GByte) \end{itemize} \includegraphics[scale=0.28]{parallelization_figures/perf_left.png} \includegraphics[scale=0.28]{parallelization_figures/perf_right.png} \includegraphics[scale=0.28]{parallelization_figures/legende.png} \\ \scriptsize \begin{columns}[T] \begin{column}{0.18\textwidth} \end{column} \begin{column}{0.4\textwidth} IBM-Regatta, HLRN, Hannover\\ (1D domain decomposition) \end{column} \begin{column}{0.4\textwidth} Sun Fire X4600, Tokyo Institute of Technology\\ (2D domain decomposition) \end{column} \begin{column}{0.2\textwidth} \end{column} \end{columns} \end{frame} % Folie 15 \begin{frame} \frametitle{Performance Examples (II)} \begin{itemize} \item Simulation with $2048^3$ grid points ($\sim$ 2 TByte memory) \end{itemize} \begin{columns}[T] \begin{column}{0.5\textwidth} \includegraphics[scale=0.25]{parallelization_figures/perf_3.png} \\ \scriptsize \quad SGI-ICE2, HLRN-II, Hannover\\ \quad (2D-domain decomposition) \end{column} \begin{column}{0.5\textwidth} \vspace{35mm} \onslide<2-> currently largest simulation feasible on that system:\\ \ \\ $4096^3$ grid points \end{column} \end{columns} \end{frame} \end{document}