1 | % $Id: parallelization.tex 945 2012-07-17 15:43:01Z maronga $ |
---|
2 | \input{header_tmp.tex} |
---|
3 | %\input{../header_lectures.tex} |
---|
4 | |
---|
5 | \usepackage[utf8]{inputenc} |
---|
6 | \usepackage{ngerman} |
---|
7 | \usepackage{pgf} |
---|
8 | \usetheme{Dresden} |
---|
9 | \usepackage{subfigure} |
---|
10 | \usepackage{units} |
---|
11 | \usepackage{multimedia} |
---|
12 | \usepackage{hyperref} |
---|
13 | \newcommand{\event}[1]{\newcommand{\eventname}{#1}} |
---|
14 | \usepackage{xmpmulti} |
---|
15 | \usepackage{tikz} |
---|
16 | \usetikzlibrary{shapes,arrows,positioning} |
---|
17 | \usetikzlibrary{decorations.markings} %neues paket |
---|
18 | \usetikzlibrary{decorations.pathreplacing} %neues paket |
---|
19 | \def\Tiny{\fontsize{4pt}{4pt}\selectfont} |
---|
20 | \usepackage{amsmath} |
---|
21 | \usepackage{amssymb} |
---|
22 | \usepackage{multicol} |
---|
23 | \usepackage{pdfcomment} |
---|
24 | \usepackage{graphicx} |
---|
25 | \usepackage{listings} |
---|
26 | \lstset{showspaces=false,language=fortran,basicstyle= |
---|
27 | \ttfamily,showstringspaces=false,captionpos=b} |
---|
28 | |
---|
29 | \institute{Institut fÌr Meteorologie und Klimatologie, Leibniz UniversitÀt Hannover} |
---|
30 | \date{last update: \today} |
---|
31 | \event{PALM Seminar} |
---|
32 | \setbeamertemplate{navigation symbols}{} |
---|
33 | |
---|
34 | \setbeamertemplate{footline} |
---|
35 | { |
---|
36 | \begin{beamercolorbox}[rightskip=-0.1cm]& |
---|
37 | {\includegraphics[height=0.65cm]{imuk_logo.pdf}\hfill \includegraphics[height=0.65cm]{luh_logo.pdf}} |
---|
38 | \end{beamercolorbox} |
---|
39 | \begin{beamercolorbox}[ht=2.5ex,dp=1.125ex, |
---|
40 | leftskip=.3cm,rightskip=0.3cm plus1fil]{title in head/foot} |
---|
41 | {\leavevmode{\usebeamerfont{author in head/foot}\insertshortauthor} \hfill \eventname \hfill \insertframenumber \; / \inserttotalframenumber} |
---|
42 | \end{beamercolorbox} |
---|
43 | \begin{beamercolorbox}[colsep=1.5pt]{lower separation line foot} |
---|
44 | \end{beamercolorbox} |
---|
45 | } |
---|
46 | %\logo{\includegraphics[width=0.3\textwidth]{luhimuk_logo.pdf}} |
---|
47 | |
---|
48 | \title[Parallelization]{Parallelization} |
---|
49 | \author{Siegfried Raasch} |
---|
50 | |
---|
51 | \begin{document} |
---|
52 | |
---|
53 | % Folie 1 |
---|
54 | \begin{frame} |
---|
55 | \titlepage |
---|
56 | \end{frame} |
---|
57 | |
---|
58 | \section{Parallelization} |
---|
59 | \subsection{Parallelization} |
---|
60 | |
---|
61 | % Folie 2 |
---|
62 | \begin{frame} |
---|
63 | \frametitle{Basics of Parallelization} |
---|
64 | \tikzstyle{yellow} = [rectangle, fill=yellow!20, text width=0.6\textwidth, font=\scriptsize] |
---|
65 | \scriptsize |
---|
66 | \textbf{Parallelization:} |
---|
67 | \begin{itemize} |
---|
68 | \item<2-> All processor elements (PE, core) are carrying out the same program (SIMD). |
---|
69 | \item<3-> Each PE of a parallel computer operates on a different set of data. |
---|
70 | \end{itemize} |
---|
71 | |
---|
72 | \ \\ |
---|
73 | \onslide<4->\textbf{Realization:} |
---|
74 | \begin{columns}[T] |
---|
75 | \begin{column}{0.45\textwidth} |
---|
76 | \onslide<5->each PE solves the equations for a different subdomain of the total domain |
---|
77 | \begin{center} |
---|
78 | \includegraphics[width=0.5\textwidth]{parallelization_figures/subdomain.png} |
---|
79 | \end{center} |
---|
80 | \onslide<7->each PE only knows the variable values from its subdomain, communication / data exchange between PEs is necessary\\ |
---|
81 | \onslide<9->\textbf{message passing model (MPI)} |
---|
82 | \end{column} |
---|
83 | \begin{column}{0.45\textwidth} |
---|
84 | \onslide<6->program loops are parallelized, i.e. each processor solves for a subset of the total index range |
---|
85 | \begin{center} |
---|
86 | \begin{tikzpicture}[auto, node distance=0] |
---|
87 | \node [yellow] (1) {% |
---|
88 | \texttt{!\$OMP DO}\\ |
---|
89 | \texttt{\quad \quad DO i = 1, 100}\\ |
---|
90 | \quad \quad \quad $\vdots$\\ |
---|
91 | \texttt{\quad \quad ENDDO}}; |
---|
92 | \end{tikzpicture} |
---|
93 | \end{center} |
---|
94 | \onslide<8-> parallelization can easily be done by the compiler, if all PEs have access to all variables (shared memory)\\ |
---|
95 | \onslide<10-> \textbf{shared memory model (OpenMP)} |
---|
96 | \end{column} |
---|
97 | \end{columns} |
---|
98 | \end{frame} |
---|
99 | |
---|
100 | % Folie 3 |
---|
101 | \begin{frame} |
---|
102 | \frametitle{Basic Architectures of Massively Parallel Computers} |
---|
103 | \tikzstyle{info} = [rectangle, text width=0.25\textwidth, font=\scriptsize] |
---|
104 | |
---|
105 | |
---|
106 | \begin{center} |
---|
107 | \begin{tikzpicture} |
---|
108 | |
---|
109 | \node (center) at (0,1) {}; |
---|
110 | \onslide<2-> \node (Network) at (-3.5,1) [ellipse,fill=green!20] {Network}; |
---|
111 | \node (dis_mem) at (-3.5,-1) [text width=0.28\textwidth] {\footnotesize \textbf{distributed} memory\\(Cray-T3E)}; |
---|
112 | \onslide<3-> \node (add_mem) at (3.5,1) [rectangle, draw] {adressable memory}; |
---|
113 | \node (sha_mem) at (3.5,-1) [text width=0.35\textwidth] {\footnotesize \textbf{shared} memory\\(SGI-Altix, multicore PCs)}; |
---|
114 | \onslide<7-> \node (MPI) at (-3.5,-3) [ellipse,fill=yellow!90] {MPI}; |
---|
115 | \onslide<8-> \node (OpenMP) at (3.5,-3) [ellipse,fill=yellow!90] {OpenMP}; |
---|
116 | \onslide<6-> \node (clustered_systems) at (0,-3) [draw, text width=0.15\textwidth] {clustered systems}; |
---|
117 | \node (cs_info) at (0,-4.2) [text width=0.4\textwidth] {\footnotesize (IBM-Regatta, Linux-Cluster, |
---|
118 | NEC-SX, SGI-ICE, Cray-XT4)}; |
---|
119 | |
---|
120 | % Adressable memory node (big) |
---|
121 | |
---|
122 | \onslide<3-> \node (p1) at (2,-0.05) [draw,circle, scale=0.9] {\scriptsize p}; |
---|
123 | \node (p2) at (2.6,-0.05) [draw,circle, scale=0.9] {\scriptsize p}; |
---|
124 | \node (p3) at (3.2,-0.05) [draw,circle, scale=0.9] {\scriptsize p}; |
---|
125 | \node (p4) at (3.8,-0.05) [draw,circle, scale=0.9] {\scriptsize p}; |
---|
126 | \node (p5) at (4.4,-0.05) [draw,circle, scale=0.9] {\scriptsize p}; |
---|
127 | \node (p6) at (5,-0.05) [draw,circle, scale=0.9] {\scriptsize p}; |
---|
128 | |
---|
129 | \draw[-] (3.5,0.7) -- (3.5,0.4); |
---|
130 | \draw[-] (2,0.4) -- (5,0.4); |
---|
131 | \draw[-] (2,0.4) -- (p1); |
---|
132 | \draw[-] (2.6,0.4) -- (p2); |
---|
133 | \draw[-] (3.2,0.4) -- (p3); |
---|
134 | \draw[-] (3.8,0.4) -- (p4); |
---|
135 | \draw[-] (4.4,0.4) -- (p5); |
---|
136 | \draw[-] (5,0.4) -- (p6); |
---|
137 | |
---|
138 | % Adressable memory node (small) |
---|
139 | \onslide<4-> |
---|
140 | \draw[->, thick] (1.5,0.2) -- (0.4,0.2) ; |
---|
141 | \node at (0,0.2) [scale=0.2] {% |
---|
142 | \begin{tikzpicture} |
---|
143 | |
---|
144 | \node (add_mem_small) at (3.5,0.9) [ultra thick, rectangle, draw, minimum width=3cm] {}; |
---|
145 | \node (p1_small) at (2,-0.05) [ultra thick, draw,circle, scale=0.9] {}; |
---|
146 | \node (p2_small) at (2.6,-0.05) [ultra thick, draw,circle, scale=0.9] {}; |
---|
147 | \node (p3_small) at (3.2,-0.05) [ultra thick, draw,circle, scale=0.9] {}; |
---|
148 | \node (p4_small) at (3.8,-0.05) [ultra thick, draw,circle, scale=0.9] {}; |
---|
149 | \node (p5_small) at (4.4,-0.05) [ultra thick, draw,circle, scale=0.9] {}; |
---|
150 | \node (p6_small) at (5,-0.05) [ultra thick, draw,circle, scale=0.9] {}; |
---|
151 | |
---|
152 | \draw[-, ultra thick] (add_mem_small.south) -- (3.5,0.4); |
---|
153 | \draw[-, ultra thick] (2,0.4) -- (5,0.4); |
---|
154 | \draw[-, ultra thick] (2,0.4) -- (p1_small); |
---|
155 | \draw[-, ultra thick] (2.6,0.4) -- (p2_small); |
---|
156 | \draw[-, ultra thick] (3.2,0.4) -- (p3_small); |
---|
157 | \draw[-, ultra thick] (3.8,0.4) -- (p4_small); |
---|
158 | \draw[-, ultra thick] (4.4,0.4) -- (p5_small); |
---|
159 | \draw[-, ultra thick] (5,0.4) -- (p6_small); |
---|
160 | |
---|
161 | |
---|
162 | \end{tikzpicture} |
---|
163 | } ; |
---|
164 | |
---|
165 | \onslide<5-> |
---|
166 | \node (add_info) at (0,-0.1) [scale=0.9] {\scriptsize node}; |
---|
167 | |
---|
168 | % Black Arrows |
---|
169 | \onslide<6-> \draw[->, thick] (-2.5,-1.5) -- (-0.8,-2.2) ; |
---|
170 | \draw[->, thick] (2.5,-1.5) -- (0.8,-2.2) ; |
---|
171 | |
---|
172 | % MPI Arrows |
---|
173 | \onslide<7-> \draw[->, ultra thick, color=yellow] (-3.5,-2.7) -- (-3.5,-1.5) ; |
---|
174 | \draw[->, ultra thick, color=yellow] (-2.9,-3) -- (-1.0,-3.0) ; |
---|
175 | \draw[->, ultra thick, color=yellow] (-3.0,-2.8) -- (1.5,-1.0) ; |
---|
176 | |
---|
177 | % OpenMP Arrows |
---|
178 | \onslide<8-> \draw[->, ultra thick, color=yellow] (3.5,-2.6) -- (3.5,-1.5) ; |
---|
179 | \draw[->, ultra thick, color=yellow] (2.5,-2.8) -- (-2.5,0.7) ; |
---|
180 | |
---|
181 | % Network decorations |
---|
182 | \onslide<2-> \node (pr1) at (-4.6,0.7) [draw,circle,scale=0.5] {}; |
---|
183 | \node (mem1) at (-4.6,0.45) [draw,rectangle,scale=0.5] {}; |
---|
184 | \draw[-] (-4.5,0.9) -- (pr1); |
---|
185 | \draw[-] (mem1) -- (pr1); |
---|
186 | \draw[-] ([xshift=0.02cm]pr1.east) -- ([xshift=0.3cm, yshift=-0.2cm]pr1.east); |
---|
187 | \draw[-] ([xshift=0.02cm]mem1.east) -- ([xshift=0.3cm, yshift=-0.2cm]mem1.east); |
---|
188 | \node at (-3.7,0.45) {\tiny processor}; |
---|
189 | \node at (-3.3,0.25) {\tiny adressable memory}; |
---|
190 | |
---|
191 | \node (pr2) at (-4.6,1.3) [draw,circle,scale=0.5] {}; |
---|
192 | \node (mem2) at (-4.6,1.55) [draw,rectangle,scale=0.5] {}; |
---|
193 | \draw[-] (-4.5,1.1) -- (pr2); |
---|
194 | \draw[-] (mem2) -- (pr2); |
---|
195 | \node (pr3) at (-3.9,1.5) [draw,circle,scale=0.5] {}; |
---|
196 | \node (mem3) at (-3.9,1.75) [draw,rectangle,scale=0.5] {}; |
---|
197 | \draw[-] (-3.8,1.3) -- (pr3); |
---|
198 | \draw[-] (mem3) -- (pr3); |
---|
199 | \node (pr4) at (-4.9,0.95) [draw,circle,scale=0.5] {}; |
---|
200 | \node (mem4) at (-4.9,0.7) [draw,rectangle,scale=0.5] {}; |
---|
201 | \draw[-] (-4.55,1.0) -- (pr4); |
---|
202 | \draw[-] (mem4) -- (pr4); |
---|
203 | \node (pr5) at (-3.0,1.5) [draw,circle,scale=0.5] {}; |
---|
204 | \node (mem5) at (-3.0,1.75) [draw,rectangle,scale=0.5] {}; |
---|
205 | \draw[-] (-3.08,1.3) -- (pr5); |
---|
206 | \draw[-] (mem5) -- (pr5); |
---|
207 | \node (pr6) at (-2.2,1.1) [draw,circle,scale=0.5] {}; |
---|
208 | \node (mem6) at (-2.2,1.35) [draw,rectangle,scale=0.5] {}; |
---|
209 | \draw[-] (-2.45,1.0) -- (pr6); |
---|
210 | \draw[-] (mem6) -- (pr6); |
---|
211 | |
---|
212 | \end{tikzpicture} |
---|
213 | \end{center} |
---|
214 | |
---|
215 | \end{frame} |
---|
216 | |
---|
217 | % Folie 4 |
---|
218 | \begin{frame} |
---|
219 | \frametitle{PALM Parallelization Model} |
---|
220 | \scriptsize |
---|
221 | \onslide<2-> \textbf{General demands for a parallelized program:} |
---|
222 | \begin{itemize} |
---|
223 | \item<3-> Load balancing |
---|
224 | \item<4-> Small communication overhead |
---|
225 | \item<5-> Scalability (up to large numbers of processors) |
---|
226 | \end{itemize} |
---|
227 | \vspace{2mm} |
---|
228 | \onslide<6-> \textbf{The basic parallelization method used for PALM is a 2D-domain decomposition along $x$ and $y$:}\\ |
---|
229 | \begin{columns}[T] |
---|
230 | \begin{column}{0.3\textwidth} |
---|
231 | \onslide<7-> \includegraphics[width=1.0\textwidth]{parallelization_figures/subdomain.png} |
---|
232 | \end{column} |
---|
233 | \begin{column}{0.6\textwidth} |
---|
234 | \ \\ |
---|
235 | \onslide<8-> contiguous data in memory (FORTRAN):\\ |
---|
236 | \ \\ |
---|
237 | \ \\ |
---|
238 | \textcolor{blue}{columns of i}\\ |
---|
239 | \textcolor{red}{no contiguous data at all}\\ |
---|
240 | \onslide<9-> \textcolor{blue}{columns of k}\\ |
---|
241 | \textcolor{red}{planes of k,j (all data contiguous)} |
---|
242 | \end{column} |
---|
243 | \end{columns} |
---|
244 | \vspace{2mm} |
---|
245 | \begin{itemize} |
---|
246 | \item<10-> Alternatively, a 1D-decomposition along $x$ or $y$ may be used in case of slow networks, but this generally doesn't scale for processor numbers $>$ 256. |
---|
247 | \item<11-> Message passing is realized using MPI. |
---|
248 | \item<12-> OpenMP parallelization as well as mixed usage of OpenMP and |
---|
249 | MPI is also possible. (OpenMP tests and optimization is under way) |
---|
250 | \end{itemize} |
---|
251 | \end{frame} |
---|
252 | |
---|
253 | % Folie 5 |
---|
254 | \begin{frame} |
---|
255 | \frametitle{Implications of Decomposition} |
---|
256 | \scriptsize |
---|
257 | \begin{columns}[T] |
---|
258 | \begin{column}{0.5\textwidth} |
---|
259 | \begin{itemize} |
---|
260 | \item<2-> Central finite differences cause \textcolor{red}{local data dependencies}\\ |
---|
261 | \ \\ |
---|
262 | solution: introduction of \textcolor{red}{ghost points} |
---|
263 | \vspace{5mm} |
---|
264 | |
---|
265 | \begin{flushright} |
---|
266 | \onslide<3-> $\left. \dfrac{\partial \psi}{\partial x} \right|_i = \dfrac{\psi_{i+1} - \psi_{i-1}}{2 \Delta x}$ |
---|
267 | \end{flushright} |
---|
268 | \vspace{10mm} |
---|
269 | \item<4-> FFT and linear equation solver cause \textcolor{red}{non-local data dependencies}\\ |
---|
270 | \ \\ |
---|
271 | solution: transposition of 3D-arrays |
---|
272 | \end{itemize} |
---|
273 | \end{column} |
---|
274 | \begin{column}{0.6\textwidth} |
---|
275 | \begin{center} |
---|
276 | \vspace{-5mm} |
---|
277 | \onslide<3-> \includegraphics[width=0.7\textwidth]{parallelization_figures/ghost_points.png} |
---|
278 | \vspace{4mm} |
---|
279 | \onslide<5-> \includegraphics[width=0.8\textwidth]{parallelization_figures/fft.png} \end{center} |
---|
280 | \vspace{-4mm} |
---|
281 | \textbf{Example: transpositions for solving the poisson equation} |
---|
282 | \end{column} |
---|
283 | \end{columns} |
---|
284 | \end{frame} |
---|
285 | |
---|
286 | % Folie 6 |
---|
287 | \begin{frame} |
---|
288 | \frametitle{How to Use the Parallelized Version of PALM} |
---|
289 | \scriptsize |
---|
290 | \begin{columns}[T] |
---|
291 | \begin{column}{1.12\textwidth} |
---|
292 | \begin{itemize} |
---|
293 | \item<1-> The parallel version of PALM is switched on by \texttt{mrun}-option ''\texttt{-K parallel}''. Additionally, the number of required processors and the number of tasks per node (number of PEs to be used on one node) have to be provided:\\ |
---|
294 | \quad \texttt{mrun ... -K parallel -X64 -T8 ...} |
---|
295 | \item<2-> From an accounting point of view, it is always most efficient to use all PEs of a node (\texttt{-T8}) (in case of a ''non-shared'' usage of nodes). |
---|
296 | \item<3-> If a normal unix-kernel operating system (not a micro-kernel) is running on each CPU, then there migth be a speed-up of the code, if 1-2 PEs less than the total number of PEs on the node are used. |
---|
297 | \item<4-> On machines with a comparably slow network, a 1D-decomposition (along $x$) should be used, because then only two transpositions have to be carried out by the pressure solver. A 1D-decomposition is automatically used for NEC-machines (e.g. \texttt{-h necriam}). The virtual processor grid to be used can be set manually by d3par-parameters \texttt{npex} and \texttt{npey}. |
---|
298 | \item<6-> Using the Open-MP parallelization does not yield any advantage over using a pure domain decomposition with MPI (contrary to expectations, it mostly slows down the computational speed), but this may change on cluster systems for very large number of processors ($>$10000?).\\ |
---|
299 | \end{itemize} |
---|
300 | \begin{center} |
---|
301 | \vspace{-7mm} |
---|
302 | \onslide<5-> \includegraphics[width=0.13\textwidth]{parallelization_figures/folie_6.png} |
---|
303 | \end{center} |
---|
304 | \end{column} |
---|
305 | \end{columns} |
---|
306 | \end{frame} |
---|
307 | |
---|
308 | % Folie 7 |
---|
309 | \begin{frame} |
---|
310 | \frametitle{MPI Communication} |
---|
311 | \scriptsize |
---|
312 | \begin{columns}[T] |
---|
313 | \begin{column}{1.12\textwidth} |
---|
314 | \begin{itemize} |
---|
315 | \item<1-> MPI (message passing interface) is a portable interface for communication between PEs (FORTRAN or C library). |
---|
316 | \vspace{2mm} |
---|
317 | \item<2-> To make MPI available on HLRNâs SGI-ICE, the module \texttt{mpt} must be loaded by setting the \texttt{\%modules} option in .mrun.config appropriately: |
---|
318 | |
---|
319 | \quad \texttt{\%modules ...:mpt:...} |
---|
320 | \vspace{2mm} |
---|
321 | \item<2-> The path to the MPI-library may have to be given in the compiler call, by setting an appropriate option in the configuration file .mrun.config: |
---|
322 | |
---|
323 | \quad \texttt{\%lopts -axW:-cpp:-r8:-nbs:-Vaxlib:\textcolor{blue}{-L:<replace by mpi library path>:-lmpi}} |
---|
324 | \vspace{2mm} |
---|
325 | \item<3-> All MPI calls must be within\\ |
---|
326 | \quad \texttt{CALL MPI\_INIT( ierror )}\\ |
---|
327 | \quad $\vdots$\\ |
---|
328 | \quad \texttt{CALL MPI\_FINALIZE( ierror )}\\ |
---|
329 | \end{itemize} |
---|
330 | |
---|
331 | \end{column} |
---|
332 | \end{columns} |
---|
333 | \end{frame} |
---|
334 | |
---|
335 | % Folie 8 |
---|
336 | \begin{frame} |
---|
337 | \frametitle{Communication Within PALM} |
---|
338 | \small |
---|
339 | \begin{itemize} |
---|
340 | \item<1-> MPI calls within PALM are available when using the \texttt{mrun}-option ''\texttt{-K parallel}''. |
---|
341 | \item<2-> Communication is needed for |
---|
342 | \begin{itemize} |
---|
343 | \footnotesize |
---|
344 | \item<2-> exchange of ghost points |
---|
345 | \item<3-> transpositions (FFT-poisson-solver) |
---|
346 | \item<4-> calculating global sums (e.g. for calculating horizontal averages) |
---|
347 | \end{itemize} |
---|
348 | \item<5-> Additional MPI calls are required to define the so-called virtual processor grid and to define special data types needed for more comfortable exchange of data. |
---|
349 | \end{itemize} |
---|
350 | \end{frame} |
---|
351 | |
---|
352 | % Folie 9 |
---|
353 | \begin{frame} |
---|
354 | \frametitle{Virtual Processor Grid Used in PALM} |
---|
355 | \footnotesize |
---|
356 | The processor grid and special data types are defined in file \texttt{init\_pegrid.f90} |
---|
357 | \begin{itemize} |
---|
358 | \item<2-> PALM uses a two-dimensional virtual processor grid (in case of a 1D-decomposition, it has only one element along $y$). It is defined by a so called communicator (here: \texttt{comm2d}):\\ |
---|
359 | \scriptsize |
---|
360 | \quad \texttt{ndim = 2}\\ |
---|
361 | \quad \texttt{pdims(1) = npex ! \# of processors along x}\\ |
---|
362 | \quad \texttt{pdims(2) = npey ! \# of processors along y}\\ |
---|
363 | \quad \texttt{cyclic(1) = .TRUE.}\\ |
---|
364 | \quad \texttt{cyclic(2) = .TRUE.}\\ |
---|
365 | |
---|
366 | \quad \texttt{CALL MPI\underline{\ }CART\underline{\ }CREATE( MPI\underline{\ }COMM\underline{\ }WORLD, ndim, pdims, cyclic, \&}\\ |
---|
367 | \quad \texttt{\hspace{10.5em} reorder, \textcolor{blue}{comm2d}, ierr )} |
---|
368 | \item<3-> The processor number (id) with respect to this processor grid, \texttt{myid}, is given by:\\ |
---|
369 | \scriptsize |
---|
370 | \quad \texttt{CALL MPI\underline{\ }COMM\underline{\ }RANK( comm2d, \textcolor{blue}{myid}, ierr )} |
---|
371 | \item<4-> The ids of the neighbouring PEs are determined by:\\ |
---|
372 | \scriptsize |
---|
373 | \quad \texttt{CALL MPI\underline{\ }CARD\underline{\ }SHIFT( comm2d, 0, 1, \textcolor{blue}{pleft}, \textcolor{blue}{pright}, ierr )}\\ |
---|
374 | \quad \texttt{CALL MPI\underline{\ }CARD\underline{\ }SHIFT( comm2d, 1, 1, \textcolor{blue}{psouth}, \textcolor{blue}{pnorth}, ierr )} |
---|
375 | \end{itemize} |
---|
376 | \end{frame} |
---|
377 | |
---|
378 | % Folie 10 |
---|
379 | \begin{frame} |
---|
380 | \frametitle{Exchange of ghost points} |
---|
381 | \scriptsize |
---|
382 | \begin{itemize} |
---|
383 | \item<1-> Ghost points are stored in additional array elements added at the horizontal boundaries of the subdomains, e.g.\\ |
---|
384 | \tiny |
---|
385 | \quad \texttt{u(:,:,nxl\textcolor{blue}{-ngl}), u(:,:,nxr\textcolor{blue}{+ngl}) ! left and right boundary}\\ |
---|
386 | \quad \texttt{u(:,nys\textcolor{blue}{-ngl},:), u(:,nyn\textcolor{blue}{+ngl},:) ! south and north boundary}\\ |
---|
387 | \hspace{3mm} |
---|
388 | \item<2-> \scriptsize The exchange of ghost points is done in file \texttt{exchange\underline{\ }horiz.f90}\\ |
---|
389 | \textbf{\underline{Simplified} example:} synchroneous exchange of ghost points along $x$ ($yz$-planes, send left, receive right plane):\\ |
---|
390 | \tiny |
---|
391 | \quad \texttt{CALL MPI\underline{\ }SENDRECV( ar(nzb,nys-\textcolor{blue}{ngl},nxl), ngp\underline{\ }yz, MPI\underline{\ }REAL, pleft, 0,}\\ |
---|
392 | \quad \texttt{\hspace{9.5em}ar(nzb,nys-\textcolor{blue}{ngl},nxr+1), ngp\underline{\ }yz, MPI\underline{\ }REAL, pright, 0,}\\ |
---|
393 | \quad \texttt{\hspace{9.5em}comm2d, status, ierr )}\\ |
---|
394 | \hspace{3mm} |
---|
395 | \item<3-> \scriptsize In the real code special MPI data types (vectors) are defined for exchange of $yz$/$xz$-planes for performance reasons and because array elements to be exchanged are not consecutively stored in memory for $xz$-planes:\\ |
---|
396 | \tiny |
---|
397 | \quad \texttt{ngp\underline{\ }yz(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * \textcolor{blue}{ngl} )}\\ |
---|
398 | \quad \texttt{CALL MPI\underline{\ }TYPE\underline{\ }VECTOR( \textcolor{blue}{ngl}, ngp\underline{\ }yz(0), ngp\underline{\ }yz(0), MPI\underline{\ }REAL, type\underline{\ }yz(0), ierr )}\\ |
---|
399 | \quad \texttt{CALL MPI\underline{\ }TYPE\underline{\ }COMMIT( type\underline{\ }xz(0), ierr ) ! see file init\underline{\ }pegrid.f90}\\ |
---|
400 | \ \\ |
---|
401 | \quad \texttt{CALL MPI\underline{\ }SENDRECV( ar(nzb,nys-ngl,nxl), type\underline{\ }yz(grid\underline{\ }level), MPI\underline{\ }REAL, pleft, 0, ...}\\ |
---|
402 | \end{itemize} |
---|
403 | \end{frame} |
---|
404 | |
---|
405 | % Folie 11 |
---|
406 | \begin{frame} |
---|
407 | \frametitle{Transpositions} |
---|
408 | \footnotesize |
---|
409 | \begin{columns}[T] |
---|
410 | \begin{column}{1.05\textwidth} |
---|
411 | \begin{itemize} |
---|
412 | \item<1-> Transpositions can be found in file \texttt{transpose.f90} (several subroutines for 1D- or 2D-decompositions; they are called mainly from the FFT pressure solver, see \texttt{poisfft.f90}.\\ |
---|
413 | \ \\ |
---|
414 | \item<2-> The following example is for a transposition from $x$ to $y$, i.e. for the input array all data elements along $x$ reside on the same PE, while after the transposition, all elements along $y$ are on the same PE:\\ |
---|
415 | \ \\ |
---|
416 | \scriptsize |
---|
417 | \texttt{!}\\ |
---|
418 | \texttt{!-- in SUBROUTINE transpose\underline{\ }xy:}\\ |
---|
419 | \texttt{CALL MPI\underline{\ }ALLTOALL( f\underline{\ }inv(nys\underline{\ }x,nzb\underline{\ }x,0), sendrecvcount\underline{\ }xy, MPI\underline{\ }REAL, \&}\\ |
---|
420 | \texttt{\hspace{9.5em}work(1), \hspace{6.5em}sendrecvcount\underline{\ }xy, MPI\underline{\ }REAL, \&}\\ |
---|
421 | \texttt{\hspace{9.5em}comm1dy, ierr )}\\ |
---|
422 | \ \\ |
---|
423 | \item<3-> The data resorting before and after the calls of MPI\_ALLTOALL is highly optimized to account for the different processor architectures. |
---|
424 | \end{itemize} |
---|
425 | \end{column} |
---|
426 | \end{columns} |
---|
427 | \end{frame} |
---|
428 | |
---|
429 | % Folie 12 |
---|
430 | \begin{frame} |
---|
431 | \frametitle{Parallel I/O} |
---|
432 | \scriptsize |
---|
433 | \begin{columns}[T] |
---|
434 | \begin{column}{1.1\textwidth} |
---|
435 | \begin{itemize} |
---|
436 | \item<1-> PALM writes and reads some of the input/output files in parallel, i.e. each processor writes/reads his own file. \textbf{Each file then has a different name!}\\ |
---|
437 | \ \\ |
---|
438 | \textbf{Example:} binary files for restart are written into a subdirectory of the PALM working directory:\\ |
---|
439 | \quad \texttt{BINOUT/\_0000}\\ |
---|
440 | \quad \texttt{BINOUT/\_0001}\\ |
---|
441 | \quad $\vdots$ |
---|
442 | \item<2-> These files can be handled (copied) by \texttt{mrun} using the file attribute \texttt{pe} in the configuration file:\\ |
---|
443 | \texttt{BINOUT out:loc:pe restart \~{}/palm/current\underline{\ }version/JOBS/\$fname/RESTART \underline{\ }d3d}\\ |
---|
444 | \ \\ |
---|
445 | \onslide<3->In this case, filenames are interpreted as directory names. |
---|
446 | An \texttt{mrun} call using option\\ |
---|
447 | ''\texttt{-d example\underline{\ }cbl -r restart}'' will copy the local \textbf{\underline{directory}} \texttt{BINOUT} to the \textbf{\underline{directory}} \texttt{.../RESTART/example\underline{\ }cbl\underline{\ }d3d} . |
---|
448 | \end{itemize} |
---|
449 | \onslide<4-> \textbf{General comment:} |
---|
450 | \begin{itemize} |
---|
451 | \item Parallel I/O on a large number of files ($>$1000) currently may cause severe file system problems (e.g. on Lustre file systems). A workaround for this problem will\\ be available soon. |
---|
452 | \end{itemize} |
---|
453 | \end{column} |
---|
454 | \end{columns} |
---|
455 | \end{frame} |
---|
456 | |
---|
457 | |
---|
458 | |
---|
459 | % Folie 13 |
---|
460 | \begin{frame} |
---|
461 | \frametitle{PALM Parallel I/O for 2D/3D Data} |
---|
462 | \footnotesize |
---|
463 | \begin{itemize} |
---|
464 | \item<1-> 2D- and 3D-data output is also written in parallel by the processors (2D: by default, 3D: generally). |
---|
465 | \item<2-> Because the graphics software (\texttt{ncview}, \texttt{ncl}, \texttt{ferret}, etc.) expect the data to be in one file, these output files have to be merged to one single file after PALM has finished.\\ |
---|
466 | \ \\ |
---|
467 | This is done within the job by calling the utility program \texttt{combine\underline{\ }plot\underline{\ }fields.x} after PALM has successfully finished. |
---|
468 | \item<3-> \texttt{combine\underline{\ }plot\underline{\ }fields.x} is automatically executed by \texttt{mrun}. |
---|
469 | \item<4-> The executable \texttt{combine\underline{\ }plot\underline{\ }fields.x} is created during the installation process by the command\\ |
---|
470 | \ \\ |
---|
471 | \quad \texttt{mbuild -u -h <host identifier>} |
---|
472 | \end{itemize} |
---|
473 | \end{frame} |
---|
474 | |
---|
475 | % Folie 14 |
---|
476 | \begin{frame} |
---|
477 | \frametitle{Performance Examples (I)} |
---|
478 | \begin{itemize} |
---|
479 | \item Simulation using 1536 * 768 * 242 grid points ($\sim$ 60 GByte) |
---|
480 | \end{itemize} |
---|
481 | \includegraphics[scale=0.28]{parallelization_figures/perf_left.png} |
---|
482 | \includegraphics[scale=0.28]{parallelization_figures/perf_right.png} |
---|
483 | \includegraphics[scale=0.28]{parallelization_figures/legende.png} \\ |
---|
484 | \scriptsize |
---|
485 | \begin{columns}[T] |
---|
486 | \begin{column}{0.18\textwidth} |
---|
487 | \end{column} |
---|
488 | \begin{column}{0.4\textwidth} |
---|
489 | IBM-Regatta, HLRN, Hannover\\ |
---|
490 | (1D domain decomposition) |
---|
491 | \end{column} |
---|
492 | \begin{column}{0.4\textwidth} |
---|
493 | Sun Fire X4600, Tokyo Institute of Technology\\ |
---|
494 | (2D domain decomposition) |
---|
495 | \end{column} |
---|
496 | \begin{column}{0.2\textwidth} |
---|
497 | \end{column} |
---|
498 | |
---|
499 | \end{columns} |
---|
500 | |
---|
501 | \end{frame} |
---|
502 | |
---|
503 | % Folie 15 |
---|
504 | \begin{frame} |
---|
505 | \frametitle{Performance Examples (II)} |
---|
506 | \begin{itemize} |
---|
507 | \item Simulation with $2048^3$ grid points ($\sim$ 2 TByte memory) |
---|
508 | \end{itemize} |
---|
509 | \begin{columns}[T] |
---|
510 | \begin{column}{0.5\textwidth} |
---|
511 | \includegraphics[scale=0.25]{parallelization_figures/perf_3.png} \\ |
---|
512 | \scriptsize |
---|
513 | \quad SGI-ICE2, HLRN-II, Hannover\\ |
---|
514 | \quad (2D-domain decomposition) |
---|
515 | \end{column} |
---|
516 | \begin{column}{0.5\textwidth} |
---|
517 | \vspace{35mm} |
---|
518 | \onslide<2-> currently largest simulation feasible on that system:\\ |
---|
519 | \ \\ |
---|
520 | $4096^3$ grid points |
---|
521 | \end{column} |
---|
522 | \end{columns} |
---|
523 | \end{frame} |
---|
524 | |
---|
525 | \end{document} |
---|