Changeset 1764 for palm/trunk/SOURCE/pmc_interface.f90
- Timestamp:
- Feb 28, 2016 12:45:19 PM (9 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/pmc_interface.f90
r1763 r1764 20 20 ! Current revisions: 21 21 ! ------------------ 22 ! 22 ! +cpl_parent_id, 23 ! cpp-statements for nesting replaced by __parallel statements, 24 ! errors output with message-subroutine, 25 ! index bugfixes in pmci_interp_tril_all, 26 ! some adjustments to PALM style 23 27 ! 24 28 ! Former revisions: … … 35 39 !------------------------------------------------------------------------------! 36 40 37 38 USE mpi 39 40 ! 41 !-- PALM modules 41 USE arrays_3d, & 42 ONLY: dzu, dzw, e, e_p, pt, pt_p, q, q_p, te_m, tu_m, tv_m, tw_m, u, & 43 u_p, v, v_p, w, w_p, zu, zw, z0 44 45 USE control_parameters, & 46 ONLY: dt_3d, dz, humidity, message_string, nest_bound_l, & 47 nest_bound_r, nest_bound_s, nest_bound_n, passive_scalar, & 48 simulated_time, topography, volume_flow 49 50 USE cpulog, & 51 ONLY: cpu_log, log_point_s 52 53 USE grid_variables, & 54 ONLY: dx, dy 55 56 USE indices, & 57 ONLY: nbgp, nx, nxl, nxlg, nxlu, nxr, nxrg, ny, nyn, nyng, nys, nysg, & 58 nysv, nz, nzb, nzb_s_inner, nzb_u_inner, nzb_u_outer, & 59 nzb_v_inner, nzb_v_outer, nzb_w_inner, nzb_w_outer, nzt 60 42 61 USE kinds 43 USE pegrid, & 44 ONLY: myid, numprocs, comm2d, comm1dx, comm1dy, myidx, myidy, collective_wait 45 USE arrays_3d, & 46 ONLY: u, v, w, e, pt, q, u_p, v_p, w_p, e_p, pt_p, q_p, z0, dzu, dzw, zu, zw, & 47 tu_m, tv_m, tw_m, te_m 48 USE indices, & 49 ONLY: nx, ny, nz, nxl, nxr, nys, nyn, nzb, nzt, nxlu, nysv, nxlg, nxrg, & 50 nysg, nyng, nbgp, nzb_u_inner, nzb_v_inner, nzb_w_inner, & 51 nzb_s_inner, nzb_u_outer, nzb_v_outer, nzb_w_outer 52 USE control_parameters, & 53 ONLY: dz, dt_3d, simulated_time, message_string, volume_flow, & 54 nest_bound_l, nest_bound_r, nest_bound_s, nest_bound_n, & 55 topography, humidity, passive_scalar 56 USE grid_variables, & 57 ONLY: dx, dy 58 USE cpulog, & 59 ONLY: cpu_log, log_point_s 60 61 ! 62 !-- PMC modules 63 USE pmc_general, & 64 ONLY: pmc_status_ok, pmc_max_modell, da_namelen 65 USE pmc_handle_communicator, & 66 ONLY: pmc_init_model, pmc_is_rootmodel, pmc_get_local_model_info, & 67 pmc_server_for_client 68 USE pmc_mpi_Wrapper, & 69 ONLY: pmc_recv_from_client, pmc_send_to_server, pmc_recv_from_server, & 70 pmc_send_to_client, pmc_bcast 71 USE pmc_server, & 72 ONLY: pmc_serverinit, pmc_s_getnextarray, & 73 pmc_s_set_dataarray, pmc_s_setind_and_allocmem, & 74 pmc_s_set_2d_index_list, pmc_s_fillbuffer,pmc_s_getdata_from_buffer 75 USE pmc_client, & 76 ONLY: pmc_clientinit, pmc_set_dataarray_name, pmc_c_get_2d_index_list, & 77 pmc_c_getnextarray, pmc_c_set_dataarray, pmc_c_setind_and_allocmem, & 78 pmc_c_putbuffer, pmc_c_getbuffer 62 63 #if defined( __parallel ) 64 #if defined( __lc ) 65 USE MPI 66 #else 67 INCLUDE "mpif.h" 68 #endif 69 70 USE pegrid, & 71 ONLY: collective_wait, comm1dx, comm1dy, comm2d, myid, myidx, myidy, & 72 numprocs 73 74 USE pmc_client, & 75 ONLY: pmc_clientinit, pmc_c_getnextarray, pmc_c_get_2d_index_list, & 76 pmc_c_getbuffer, pmc_c_putbuffer, pmc_c_setind_and_allocmem, & 77 pmc_c_set_dataarray, pmc_set_dataarray_name 78 79 USE pmc_general, & 80 ONLY: da_namelen, pmc_max_modell, pmc_status_ok 81 82 USE pmc_handle_communicator, & 83 ONLY: pmc_get_local_model_info, pmc_init_model, pmc_is_rootmodel, & 84 pmc_no_namelist_found, pmc_server_for_client 85 86 USE pmc_mpi_wrapper, & 87 ONLY: pmc_bcast, pmc_recv_from_client, pmc_recv_from_server, & 88 pmc_send_to_client, pmc_send_to_server 89 90 USE pmc_server, & 91 ONLY: pmc_serverinit, pmc_s_fillbuffer, pmc_s_getdata_from_buffer, & 92 pmc_s_getnextarray, pmc_s_setind_and_allocmem, & 93 pmc_s_set_dataarray, pmc_s_set_2d_index_list 94 95 #endif 79 96 80 97 IMPLICIT NONE 81 98 99 !-- TO_DO: a lot of lines (including comments) in this file exceed the 80 char 100 !-- limit. Try to reduce as much as possible 101 102 !-- TO_DO: shouldn't we use public as default here? Only a minority of the 103 !-- variables is private. 82 104 PRIVATE !: Note that the default publicity is here set to private. 83 105 84 106 ! 85 107 !-- Constants 86 INTEGER(iwp), PARAMETER, PUBLIC 87 INTEGER(iwp), PARAMETER, PUBLIC 108 INTEGER(iwp), PARAMETER, PUBLIC :: client_to_server = 2 !: 109 INTEGER(iwp), PARAMETER, PUBLIC :: server_to_client = 1 !: 88 110 89 111 ! 90 112 !-- Coupler setup 91 INTEGER(iwp), PUBLIC, SAVE :: cpl_id !: 92 CHARACTER(LEN=32), PUBLIC, SAVE :: cpl_name !: 93 INTEGER(iwp), PUBLIC, SAVE :: cpl_npex !: 94 INTEGER(iwp), PUBLIC, SAVE :: cpl_npey !: 113 INTEGER(iwp), PUBLIC, SAVE :: cpl_id = 1 !: 114 CHARACTER(LEN=32), PUBLIC, SAVE :: cpl_name !: 115 INTEGER(iwp), PUBLIC, SAVE :: cpl_npex !: 116 INTEGER(iwp), PUBLIC, SAVE :: cpl_npey !: 117 INTEGER(iwp), PUBLIC, SAVE :: cpl_parent_id !: 95 118 96 119 ! 97 120 !-- Control parameters, will be made input parameters later 98 CHARACTER(LEN=7), PUBLIC, SAVE :: nesting_mode = 'two-way' !: 99 REAL(wp), PUBLIC, SAVE :: anterp_relax_length_l = -1.0_wp !: 100 REAL(wp), PUBLIC, SAVE :: anterp_relax_length_r = -1.0_wp !: 101 REAL(wp), PUBLIC, SAVE :: anterp_relax_length_s = -1.0_wp !: 102 REAL(wp), PUBLIC, SAVE :: anterp_relax_length_n = -1.0_wp !: 103 REAL(wp), PUBLIC, SAVE :: anterp_relax_length_t = -1.0_wp !: 121 CHARACTER(LEN=7), PUBLIC, SAVE :: nesting_mode = 'two-way' !: steering parameter for one- or two-way nesting 122 123 LOGICAL, PUBLIC, SAVE :: nested_run = .FALSE. !: general switch if nested run or not 124 125 REAL(wp), PUBLIC, SAVE :: anterp_relax_length_l = -1.0_wp !: 126 REAL(wp), PUBLIC, SAVE :: anterp_relax_length_r = -1.0_wp !: 127 REAL(wp), PUBLIC, SAVE :: anterp_relax_length_s = -1.0_wp !: 128 REAL(wp), PUBLIC, SAVE :: anterp_relax_length_n = -1.0_wp !: 129 REAL(wp), PUBLIC, SAVE :: anterp_relax_length_t = -1.0_wp !: 104 130 105 131 ! … … 120 146 REAL(wp), SAVE, DIMENSION(:,:,:), ALLOCATABLE, TARGET, PUBLIC :: qc !: 121 147 122 INTEGER(iwp), DIMENSION(5) :: coarse_bound !: Moved here form map_fine_to_coarse.148 INTEGER(iwp), DIMENSION(5) :: coarse_bound !: 123 149 REAL(wp), PUBLIC, SAVE :: xexl !: 124 150 REAL(wp), PUBLIC, SAVE :: xexr !: … … 229 255 INTEGER(iwp) :: nx 230 256 INTEGER(iwp) :: ny 231 INTEGER (iwp):: nz257 INTEGER(iwp) :: nz 232 258 REAL(wp) :: dx 233 259 REAL(wp) :: dy … … 247 273 TYPE(coarsegrid_def), SAVE :: cg !: 248 274 249 ! 250 !-- Interface section. 275 276 INTERFACE pmci_client_datatrans 277 MODULE PROCEDURE pmci_client_datatrans 278 END INTERFACE 279 280 INTERFACE pmci_client_initialize 281 MODULE PROCEDURE pmci_client_initialize 282 END INTERFACE 283 284 INTERFACE pmci_client_synchronize 285 MODULE PROCEDURE pmci_client_synchronize 286 END INTERFACE 287 288 INTERFACE pmci_ensure_nest_mass_conservation 289 MODULE PROCEDURE pmci_ensure_nest_mass_conservation 290 END INTERFACE 291 251 292 INTERFACE pmci_init 252 293 MODULE PROCEDURE pmci_init 253 294 END INTERFACE 254 295 255 296 INTERFACE pmci_modelconfiguration 256 297 MODULE PROCEDURE pmci_modelconfiguration 257 298 END INTERFACE 258 299 300 INTERFACE pmci_server_initialize 301 MODULE PROCEDURE pmci_server_initialize 302 END INTERFACE 303 259 304 INTERFACE pmci_server_synchronize 260 305 MODULE PROCEDURE pmci_server_synchronize 261 306 END INTERFACE 262 263 INTERFACE pmci_client_synchronize 264 MODULE PROCEDURE pmci_client_synchronize 265 END INTERFACE 266 267 INTERFACE pmci_server_datatrans 268 MODULE PROCEDURE pmci_server_datatrans 269 END INTERFACE 270 271 INTERFACE pmci_client_datatrans 272 MODULE PROCEDURE pmci_client_datatrans 273 END INTERFACE 274 307 275 308 INTERFACE pmci_update_new 276 309 MODULE PROCEDURE pmci_update_new 277 310 END INTERFACE 278 311 279 INTERFACE pmci_ensure_nest_mass_conservation 280 MODULE PROCEDURE pmci_ensure_nest_mass_conservation 281 END INTERFACE 282 283 INTERFACE pmci_server_initialize 284 MODULE PROCEDURE pmci_server_initialize 285 END INTERFACE 286 287 INTERFACE pmci_client_initialize 288 MODULE PROCEDURE pmci_client_initialize 289 END INTERFACE 290 312 PUBLIC pmci_client_datatrans 313 PUBLIC pmci_client_initialize 314 PUBLIC pmci_client_synchronize 315 PUBLIC pmci_ensure_nest_mass_conservation 291 316 PUBLIC pmci_init 292 317 PUBLIC pmci_modelconfiguration 318 PUBLIC pmci_server_datatrans 319 PUBLIC pmci_server_initialize 293 320 PUBLIC pmci_server_synchronize 294 PUBLIC pmci_client_synchronize295 PUBLIC pmci_server_datatrans296 PUBLIC pmci_client_datatrans297 321 PUBLIC pmci_update_new 298 PUBLIC pmci_ensure_nest_mass_conservation299 PUBLIC pmci_server_initialize300 PUBLIC pmci_client_initialize301 322 302 323 … … 305 326 306 327 SUBROUTINE pmci_init( world_comm ) 328 307 329 IMPLICIT NONE 308 330 309 INTEGER, INTENT(OUT) :: world_comm !: 310 311 INTEGER(iwp) :: ierr !: 312 INTEGER(iwp) :: istat !: 313 INTEGER(iwp) :: PMC_status !: 314 315 316 #if defined PMC_ACTIVE 317 CALL pmc_init_model( world_comm, pmc_status ) 318 IF ( pmc_status /= pmc_status_ok ) THEN 319 CALL MPI_ABORT( MPI_COMM_WORLD, istat, ierr ) 331 INTEGER, INTENT(OUT) :: world_comm !: 332 333 #if defined( __parallel ) 334 335 INTEGER(iwp) :: ierr !: 336 INTEGER(iwp) :: istat !: 337 INTEGER(iwp) :: pmc_status !: 338 339 340 CALL pmc_init_model( world_comm, nesting_mode, pmc_status ) 341 342 IF ( pmc_status == pmc_no_namelist_found ) THEN 343 ! 344 !-- This is not a nested run 345 ! 346 !-- TO_DO: this wouldn't be required any more? 347 world_comm = MPI_COMM_WORLD 348 cpl_id = 1 349 cpl_name = "" 350 cpl_npex = 2 351 cpl_npey = 2 352 lower_left_coord_x = 0.0_wp 353 lower_left_coord_y = 0.0_wp 354 RETURN 355 ELSE 356 ! 357 !-- Set the general steering switch which tells PALM that its a nested run 358 nested_run = .TRUE. 320 359 ENDIF 321 CALL pmc_get_local_model_info( my_cpl_id = cpl_id, cpl_name = cpl_name, npe_x=cpl_npex, npe_y = cpl_npey, & 322 lower_left_x = lower_left_coord_x, lower_left_y = lower_left_coord_y ) 360 361 CALL pmc_get_local_model_info( my_cpl_id = cpl_id, & 362 my_cpl_parent_id = cpl_parent_id, & 363 cpl_name = cpl_name, & 364 npe_x = cpl_npex, npe_y = cpl_npey, & 365 lower_left_x = lower_left_coord_x, & 366 lower_left_y = lower_left_coord_y ) 367 ! 368 !-- Message that communicators for nesting are initialized. 369 !-- Attention: myid has been set at the end of pmc_init_model in order to 370 !-- guarantee that only PE0 of the root domain does the output. 371 CALL location_message( 'finished', .TRUE. ) 372 ! 373 !-- Reset myid to its default value 374 myid = 0 323 375 #else 324 world_comm = MPI_COMM_WORLD 376 ! 377 !-- Nesting cannot be used in serial mode. cpl_id is set to root domain (1) 378 !-- because no location messages would be generated otherwise. 379 !-- world_comm is given a dummy value to avoid compiler warnings (INTENT(OUT) 380 !-- should get an explicit value) 325 381 cpl_id = 1 326 cpl_name = "" 327 cpl_npex = 2 328 cpl_npey = 2 329 lower_left_coord_x = 0.0_wp 330 lower_left_coord_y = 0.0_wp 382 nested_run = .FALSE. 383 world_comm = 1 331 384 #endif 332 385 … … 336 389 337 390 SUBROUTINE pmci_modelconfiguration 391 338 392 IMPLICIT NONE 339 393 394 CALL location_message( 'setup the nested model configuration', .FALSE. ) 340 395 CALL pmci_setup_coordinates !: Compute absolute coordinates valid for all models 341 CALL pmci_setup_client !: Initialize PMC Client (Must be called before pmc_ palm_SetUp_Server)396 CALL pmci_setup_client !: Initialize PMC Client (Must be called before pmc_setup_server) 342 397 CALL pmci_setup_server !: Initialize PMC Server 398 CALL location_message( 'finished', .TRUE. ) 343 399 344 400 END SUBROUTINE pmci_modelconfiguration … … 347 403 348 404 SUBROUTINE pmci_setup_server 405 406 #if defined( __parallel ) 349 407 IMPLICIT NONE 350 408 … … 371 429 372 430 373 #if defined PMC_ACTIVE 374 CALL pmc_serverinit ! Initialize PMC Server 375 376 ! 377 !-- Get coordinates from all Clients. 431 ! 432 ! Initialize the PMC server 433 CALL pmc_serverinit 434 435 ! 436 !-- Get coordinates from all clients 378 437 DO m = 1, SIZE( pmc_server_for_client ) - 1 379 438 client_id = pmc_server_for_client(m) … … 391 450 392 451 ! 393 !-- Find the highest client level in the coarse grid for the reduced z transfer 452 !-- Find the highest client level in the coarse grid for the reduced z 453 !-- transfer 394 454 DO k = 1, nz 395 455 IF ( zw(k) > fval(1) ) THEN … … 404 464 ALLOCATE( cl_coord_y(-nbgp:ny_cl+nbgp) ) 405 465 406 CALL pmc_recv_from_client( client_id, cl_coord_x, SIZE( cl_coord_x ), 0, 11, ierr ) 407 CALL pmc_recv_from_client( client_id, cl_coord_y, SIZE( cl_coord_y ), 0, 12, ierr ) 466 CALL pmc_recv_from_client( client_id, cl_coord_x, SIZE( cl_coord_x ),& 467 0, 11, ierr ) 468 CALL pmc_recv_from_client( client_id, cl_coord_y, SIZE( cl_coord_y ),& 469 0, 12, ierr ) 408 470 WRITE ( 0, * ) 'receive from pmc Client ', client_id, nx_cl, ny_cl 409 471 410 472 define_coarse_grid_real(1) = lower_left_coord_x 411 473 define_coarse_grid_real(2) = lower_left_coord_y 412 define_coarse_grid_real(3) = 0 ! KK currently not used. 474 !-- TO_DO: remove this? 475 define_coarse_grid_real(3) = 0 ! KK currently not used. 413 476 define_coarse_grid_real(4) = 0 414 477 define_coarse_grid_real(5) = dx 415 478 define_coarse_grid_real(6) = dy 416 define_coarse_grid_real(7) = lower_left_coord_x + ( nx + 1 ) * dx ! AH: corrected 6.2.2015417 define_coarse_grid_real(8) = lower_left_coord_y + ( ny + 1 ) * dy ! AH: corrected 6.2.2015418 define_coarse_grid_real(9) = dz ! AH: added 24.2.2015479 define_coarse_grid_real(7) = lower_left_coord_x + ( nx + 1 ) * dx 480 define_coarse_grid_real(8) = lower_left_coord_y + ( ny + 1 ) * dy 481 define_coarse_grid_real(9) = dz 419 482 420 483 define_coarse_grid_int(1) = nx … … 437 500 ! 438 501 !-- Send coarse grid information to client 439 CALL pmc_send_to_client( client_id, Define_coarse_grid_real, 9, 0, 21, ierr ) 440 CALL pmc_send_to_client( client_id, Define_coarse_grid_int, 3, 0, 22, ierr ) 441 442 ! 443 !-- Send local grid to client. 502 CALL pmc_send_to_client( client_id, Define_coarse_grid_real, 9, 0, & 503 21, ierr ) 504 CALL pmc_send_to_client( client_id, Define_coarse_grid_int, 3, 0, & 505 22, ierr ) 506 507 ! 508 !-- Send local grid to client 444 509 CALL pmc_send_to_client( client_id, coord_x, nx+1+2*nbgp, 0, 24, ierr ) 445 510 CALL pmc_send_to_client( client_id, coord_y, ny+1+2*nbgp, 0, 25, ierr ) 446 511 447 512 ! 448 !-- Also send the dzu-, dzw-, zu- and zw-arrays here .513 !-- Also send the dzu-, dzw-, zu- and zw-arrays here 449 514 CALL pmc_send_to_client( client_id, dzu, nz_cl + 1, 0, 26, ierr ) 450 515 CALL pmc_send_to_client( client_id, dzw, nz_cl + 1, 0, 27, ierr ) … … 454 519 ENDIF 455 520 456 CALL MPI_B cast( nomatch, 1, MPI_INTEGER, 0, comm2d, ierr )521 CALL MPI_BCAST( nomatch, 1, MPI_INTEGER, 0, comm2d, ierr ) 457 522 IF ( nomatch /= 0 ) THEN 458 WRITE ( message_string, * ) 'Error: nested client domain does not fit', &459 ' into its server domain'523 WRITE ( message_string, * ) 'Error: nested client domain does ', & 524 'not fit into its server domain' 460 525 CALL message( 'pmc_palm_setup_server', 'PA0XYZ', 1, 2, 0, 6, 0 ) 461 526 ENDIF 462 527 463 CALL MPI_B cast( nz_cl, 1, MPI_INTEGER, 0, comm2d, ierr )528 CALL MPI_BCAST( nz_cl, 1, MPI_INTEGER, 0, comm2d, ierr ) 464 529 465 530 CALL pmci_create_index_list … … 468 533 !-- Include couple arrays into server content 469 534 DO WHILE ( pmc_s_getnextarray( client_id, myname ) ) 470 CALL pmci_set_array_pointer( myName, client_id = client_id, nz_cl = nz_cl ) 535 CALL pmci_set_array_pointer( myname, client_id = client_id, & 536 nz_cl = nz_cl ) 471 537 ENDDO 472 538 CALL pmc_s_setind_and_allocmem( client_id ) 473 539 ENDDO 474 540 475 #endif476 477 478 541 CONTAINS 479 542 480 543 481 544 SUBROUTINE pmci_create_index_list 545 482 546 IMPLICIT NONE 547 483 548 INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: coarse_bound_all !: 484 549 INTEGER(iwp) :: i !: … … 504 569 CALL pmc_recv_from_client( client_id, size_of_array, 2, 0, 40, ierr ) 505 570 ALLOCATE( coarse_bound_all(size_of_array(1),size_of_array(2)) ) 506 CALL pmc_recv_from_client( client_id, coarse_bound_all, SIZE( coarse_bound_all ), 0, 41, ierr ) 571 CALL pmc_recv_from_client( client_id, coarse_bound_all, & 572 SIZE( coarse_bound_all ), 0, 41, ierr ) 507 573 508 574 ! … … 519 585 ALLOCATE( index_list(6,ic) ) 520 586 521 CALL MPI_C omm_size( comm1dx, npx, ierr )522 CALL MPI_C omm_size( comm1dy, npy, ierr )587 CALL MPI_COMM_SIZE( comm1dx, npx, ierr ) 588 CALL MPI_COMM_SIZE( comm1dy, npy, ierr ) 523 589 524 590 nrx = nxr - nxl + 1 ! +1 in index because FORTRAN indexing starts with 1, palm with 0 … … 532 598 scoord(1) = px 533 599 scoord(2) = py 534 CALL MPI_C art_rank( comm2d, scoord, server_pe, ierr )600 CALL MPI_CART_RANK( comm2d, scoord, server_pe, ierr ) 535 601 536 602 ic = ic + 1 … … 546 612 CALL pmc_s_set_2d_index_list( client_id, index_list(:,1:ic) ) 547 613 ELSE 548 ALLOCATE( index_list(6,1) ) 614 ALLOCATE( index_list(6,1) ) ! Dummy allocate 549 615 CALL pmc_s_set_2d_index_list( client_id, index_list ) 550 616 ENDIF … … 554 620 END SUBROUTINE pmci_create_index_list 555 621 556 622 #endif 557 623 END SUBROUTINE pmci_setup_server 558 624 … … 560 626 561 627 SUBROUTINE pmci_setup_client 628 629 #if defined( __parallel ) 562 630 IMPLICIT NONE 631 632 CHARACTER(LEN=DA_Namelen) :: myname !: 633 563 634 INTEGER(iwp) :: i !: 564 635 INTEGER(iwp) :: ierr !: … … 579 650 REAL(wp), DIMENSION(4) :: ztt !: 580 651 581 CHARACTER(LEN=DA_Namelen) :: myname !: 582 583 584 #if defined PMC_ACTIVE 585 IF ( .not. pmc_is_rootmodel() ) THEN ! Root Model does not have Server and is not a client 652 653 !-- TO_DO: describe what is happening in this if-clause 654 !-- Root Model does not have Server and is not a client 655 IF ( .NOT. pmc_is_rootmodel() ) THEN 586 656 CALL pmc_clientinit 587 657 … … 596 666 597 667 ! 598 !-- Update this list appropritely and also in create_client_arrays and in pmci_set_array_pointer. 599 !-- If a variable is removed, it only has tobe removed from here. 600 CALL pmc_set_dataarray_name( lastentry = .true. ) 601 602 ! 603 !-- Send grid to Server 668 !-- Update this list appropritely and also in create_client_arrays and in 669 !-- pmci_set_array_pointer. 670 !-- If a variable is removed, it only has to be removed from here. 671 CALL pmc_set_dataarray_name( lastentry = .TRUE. ) 672 673 ! 674 !-- Send grid to server 604 675 val(1) = nx 605 676 val(2) = ny … … 621 692 622 693 ! 623 !-- Receive also the dz-,zu- and zw-arrays here. 694 !-- Receive also the dz-,zu- and zw-arrays here. 695 !-- TO_DO: what is the meaning of above comment + remove write statements 696 !-- and give this informations in header 624 697 WRITE(0,*) 'Coarse grid from Server ' 625 698 WRITE(0,*) 'startx_tot = ',define_coarse_grid_real(1) … … 635 708 ENDIF 636 709 637 CALL MPI_B cast( define_coarse_grid_real, 9, MPI_REAL, 0, comm2d, ierr )638 CALL MPI_B cast( define_coarse_grid_int,3, MPI_INTEGER, 0, comm2d, ierr )710 CALL MPI_BCAST( define_coarse_grid_real, 9, MPI_REAL, 0, comm2d, ierr ) 711 CALL MPI_BCAST( define_coarse_grid_int, 3, MPI_INTEGER, 0, comm2d, ierr ) 639 712 640 713 cg%dx = define_coarse_grid_real(5) … … 658 731 !-- Get coarse grid coordinates and vales of the z-direction from server 659 732 IF ( myid == 0) THEN 660 CALL pmc_recv_from_server( cg%coord_x, cg%nx + 1 + 2 * nbgp, 0, 24, ierr ) 661 CALL pmc_recv_from_server( cg%coord_y, cg%ny + 1 + 2 * nbgp, 0, 25, ierr ) 733 CALL pmc_recv_from_server( cg%coord_x, cg%nx + 1 + 2 * nbgp, 0, 24, & 734 ierr ) 735 CALL pmc_recv_from_server( cg%coord_y, cg%ny + 1 + 2 * nbgp, 0, 25, & 736 ierr ) 662 737 CALL pmc_recv_from_server( cg%dzu, cg%nz + 1, 0, 26, ierr ) 663 738 CALL pmc_recv_from_server( cg%dzw, cg%nz + 1, 0, 27, ierr ) … … 668 743 ! 669 744 !-- and broadcast this information 670 CALL MPI_Bcast( cg%coord_x, cg%nx + 1 + 2 * nbgp, MPI_REAL, 0, comm2d, ierr ) 671 CALL MPI_Bcast( cg%coord_y, cg%ny + 1 + 2 * nbgp, MPI_REAL, 0, comm2d, ierr ) 672 CALL MPI_Bcast( cg%dzu, cg%nz + 1, MPI_REAL, 0, comm2d, ierr ) 673 CALL MPI_Bcast( cg%dzw, cg%nz + 1, MPI_REAL, 0, comm2d, ierr ) 674 CALL MPI_Bcast( cg%zu, cg%nz + 2, MPI_REAL, 0, comm2d, ierr ) 675 CALL MPI_Bcast( cg%zw, cg%nz + 2, MPI_REAL, 0, comm2d, ierr ) 745 CALL MPI_BCAST( cg%coord_x, cg%nx + 1 + 2 * nbgp, MPI_REAL, 0, comm2d, & 746 ierr ) 747 CALL MPI_BCAST( cg%coord_y, cg%ny + 1 + 2 * nbgp, MPI_REAL, 0, comm2d, & 748 ierr ) 749 CALL MPI_BCAST( cg%dzu, cg%nz + 1, MPI_REAL, 0, comm2d, ierr ) 750 CALL MPI_BCAST( cg%dzw, cg%nz + 1, MPI_REAL, 0, comm2d, ierr ) 751 CALL MPI_BCAST( cg%zu, cg%nz + 2, MPI_REAL, 0, comm2d, ierr ) 752 CALL MPI_BCAST( cg%zw, cg%nz + 2, MPI_REAL, 0, comm2d, ierr ) 676 753 677 754 CALL pmci_map_fine_to_coarse_grid 678 679 755 CALL pmc_c_get_2d_index_list 680 756 … … 682 758 !-- Include couple arrays into client content. 683 759 DO WHILE ( pmc_c_getnextarray( myname ) ) 684 CALL pmci_create_client_arrays ( myName, icl, icr, jcs, jcn, cg%nz ) ! Klaus, why the c-arrays are still up to cg%nz?? 760 !-- TO_DO: Klaus, why the c-arrays are still up to cg%nz?? 761 CALL pmci_create_client_arrays ( myname, icl, icr, jcs, jcn, cg%nz ) 685 762 ENDDO 686 763 CALL pmc_c_setind_and_allocmem 687 764 688 765 ! 689 !-- Precompute interpolation coefficients and client-array indices .766 !-- Precompute interpolation coefficients and client-array indices 690 767 CALL pmci_init_interp_tril 691 768 … … 695 772 696 773 ! 697 !-- Define the SGS-TKE scaling factor based on the grid-spacing ratio .774 !-- Define the SGS-TKE scaling factor based on the grid-spacing ratio 698 775 CALL pmci_init_tkefactor 699 776 700 777 ! 701 778 !-- Two-way coupling 702 IF ( nesting_mode == 'two-way' ) THEN779 IF ( nesting_mode == 'two-way' ) THEN 703 780 CALL pmci_init_anterp_tophat 704 781 ENDIF … … 712 789 713 790 ! 714 !-- Why not just simply? test this!791 !-- TO_DO: Why not just simply? test this! 715 792 !area_t_l = ( nx + 1 ) * (ny + 1 ) * dx * dy 716 793 717 ENDIF ! IF ( .not. PMC_is_RootModel ) 718 #endif 719 794 ENDIF 720 795 721 796 CONTAINS … … 723 798 724 799 SUBROUTINE pmci_map_fine_to_coarse_grid 800 725 801 IMPLICIT NONE 802 726 803 INTEGER(iwp), DIMENSION(5,numprocs) :: coarse_bound_all !: 727 804 INTEGER(iwp), DIMENSION(2) :: size_of_array !: … … 730 807 REAL(wp) :: coarse_dy !: 731 808 REAL(wp) :: loffset !: 809 REAL(wp) :: noffset !: 732 810 REAL(wp) :: roffset !: 733 REAL(wp) :: noffset !:734 811 REAL(wp) :: soffset !: 735 812 … … 792 869 ! 793 870 !-- Note that MPI_Gather receives data from all processes in the rank order. 794 CALL MPI_G ather( coarse_bound, 5, MPI_INTEGER, coarse_bound_all, 5, &871 CALL MPI_GATHER( coarse_bound, 5, MPI_INTEGER, coarse_bound_all, 5, & 795 872 MPI_INTEGER, 0, comm2d, ierr ) 796 873 … … 968 1045 DO i = nxl - 1, nxl 969 1046 DO j = nys, nyn 970 nzt_topo_nestbc_l = MAX( nzt_topo_nestbc_l, nzb_u_inner(j,i), nzb_v_inner(j,i), nzb_w_inner(j,i) ) 1047 nzt_topo_nestbc_l = MAX( nzt_topo_nestbc_l, nzb_u_inner(j,i), & 1048 nzb_v_inner(j,i), nzb_w_inner(j,i) ) 971 1049 ENDDO 972 1050 ENDDO … … 978 1056 i = nxr + 1 979 1057 DO j = nys, nyn 980 nzt_topo_nestbc_r = MAX( nzt_topo_nestbc_r, nzb_u_inner(j,i), nzb_v_inner(j,i), nzb_w_inner(j,i) ) 1058 nzt_topo_nestbc_r = MAX( nzt_topo_nestbc_r, nzb_u_inner(j,i), & 1059 nzb_v_inner(j,i), nzb_w_inner(j,i) ) 981 1060 ENDDO 982 1061 nzt_topo_nestbc_r = nzt_topo_nestbc_r + 1 … … 987 1066 DO j = nys - 1, nys 988 1067 DO i = nxl, nxr 989 nzt_topo_nestbc_s = MAX( nzt_topo_nestbc_s, nzb_u_inner(j,i), nzb_v_inner(j,i), nzb_w_inner(j,i) ) 1068 nzt_topo_nestbc_s = MAX( nzt_topo_nestbc_s, nzb_u_inner(j,i), & 1069 nzb_v_inner(j,i), nzb_w_inner(j,i) ) 990 1070 ENDDO 991 1071 ENDDO … … 997 1077 j = nyn + 1 998 1078 DO i = nxl, nxr 999 nzt_topo_nestbc_n = MAX( nzt_topo_nestbc_n, nzb_u_inner(j,i), nzb_v_inner(j,i), nzb_w_inner(j,i) ) 1079 nzt_topo_nestbc_n = MAX( nzt_topo_nestbc_n, nzb_u_inner(j,i), & 1080 nzb_v_inner(j,i), nzb_w_inner(j,i) ) 1000 1081 ENDDO 1001 1082 nzt_topo_nestbc_n = nzt_topo_nestbc_n + 1 … … 1003 1084 1004 1085 ! 1005 !-- Then determine the maximum number of near-wall nodes per wall point based on the grid-spacing ratios. 1006 nzt_topo_max = MAX( nzt_topo_nestbc_l, nzt_topo_nestbc_r, nzt_topo_nestbc_s, nzt_topo_nestbc_n ) 1007 ni = CEILING( cg%dx / dx ) / 2 ! Note that the outer division must be integer division. 1008 nj = CEILING( cg%dy / dy ) / 2 ! Note that the outer division must be integer division. 1086 !-- Then determine the maximum number of near-wall nodes per wall point based 1087 !-- on the grid-spacing ratios. 1088 nzt_topo_max = MAX( nzt_topo_nestbc_l, nzt_topo_nestbc_r, & 1089 nzt_topo_nestbc_s, nzt_topo_nestbc_n ) 1090 1091 ! 1092 !-- Note that the outer division must be integer division. 1093 ni = CEILING( cg%dx / dx ) / 2 1094 nj = CEILING( cg%dy / dy ) / 2 1009 1095 nk = 1 1010 1096 DO k = 1, nzt_topo_max … … 1018 1104 1019 1105 ! 1020 !-- First horizontal walls .1021 !-- Left boundary .1106 !-- First horizontal walls 1107 !-- Left boundary 1022 1108 IF ( nest_bound_l ) THEN 1023 1109 ALLOCATE( logc_u_l(nzb:nzt_topo_nestbc_l, nys:nyn, 1:2) ) … … 1033 1119 1034 1120 DO j = nys, nyn 1035 1036 ! 1037 !-- Left boundary for u. 1121 ! 1122 !-- Left boundary for u 1038 1123 i = 0 1039 1124 kb = nzb_u_inner(j,i) 1040 1125 k = kb + 1 1041 1126 wall_index = kb 1042 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, wall_index, z0(j,i), kb, direction, ncorr ) 1127 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, & 1128 wall_index, z0(j,i), kb, direction, ncorr ) 1043 1129 logc_u_l(k,j,1) = lc 1044 1130 logc_ratio_u_l(k,j,1,0:ncorr-1) = lcr(0:ncorr-1) … … 1046 1132 1047 1133 ! 1048 !-- Left boundary for v .1134 !-- Left boundary for v 1049 1135 i = -1 1050 1136 kb = nzb_v_inner(j,i) 1051 1137 k = kb + 1 1052 1138 wall_index = kb 1053 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, wall_index, z0(j,i), kb, direction, ncorr ) 1139 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, & 1140 wall_index, z0(j,i), kb, direction, ncorr ) 1054 1141 logc_v_l(k,j,1) = lc 1055 1142 logc_ratio_v_l(k,j,1,0:ncorr-1) = lcr(0:ncorr-1) 1056 1143 lcr(0:ncorr-1) = 1.0_wp 1057 ENDDO 1058 ENDIF 1059 1060 ! 1061 !-- Right boundary. 1144 1145 ENDDO 1146 ENDIF 1147 1148 ! 1149 !-- Right boundary 1062 1150 IF ( nest_bound_r ) THEN 1063 1151 ALLOCATE( logc_u_r(nzb:nzt_topo_nestbc_r,nys:nyn,1:2) ) … … 1071 1159 direction = 1 1072 1160 inc = 1 1073 DO j = nys, nyn 1074 1161 DO j = nys, nyn 1075 1162 ! 1076 1163 !-- Right boundary for u. … … 1079 1166 k = kb + 1 1080 1167 wall_index = kb 1081 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, wall_index, z0(j,i), kb, direction, ncorr ) 1168 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, & 1169 wall_index, z0(j,i), kb, direction, ncorr ) 1082 1170 logc_u_r(k,j,1) = lc 1083 1171 logc_ratio_u_r(k,j,1,0:ncorr-1) = lcr(0:ncorr-1) … … 1090 1178 k = kb + 1 1091 1179 wall_index = kb 1092 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, wall_index, z0(j,i), kb, direction, ncorr ) 1180 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, & 1181 wall_index, z0(j,i), kb, direction, ncorr ) 1093 1182 logc_v_r(k,j,1) = lc 1094 1183 logc_ratio_v_r(k,j,1,0:ncorr-1) = lcr(0:ncorr-1) 1095 1184 lcr(0:ncorr-1) = 1.0_wp 1096 ENDDO 1097 ENDIF 1098 1099 ! 1100 !-- South boundary. 1185 1186 ENDDO 1187 ENDIF 1188 1189 ! 1190 !-- South boundary 1101 1191 IF ( nest_bound_s ) THEN 1102 1192 ALLOCATE( logc_u_s(nzb:nzt_topo_nestbc_s,nxl:nxr,1:2) ) … … 1111 1201 inc = 1 1112 1202 DO i = nxl, nxr 1113 1114 1203 ! 1115 1204 !-- South boundary for u. … … 1118 1207 k = kb + 1 1119 1208 wall_index = kb 1120 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, wall_index, z0(j,i), kb, direction, ncorr ) 1209 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, & 1210 wall_index, z0(j,i), kb, direction, ncorr ) 1121 1211 logc_u_s(k,i,1) = lc 1122 1212 logc_ratio_u_s(k,i,1,0:ncorr-1) = lcr(0:ncorr-1) … … 1124 1214 1125 1215 ! 1126 !-- South boundary for v .1216 !-- South boundary for v 1127 1217 j = 0 1128 1218 kb = nzb_v_inner(j,i) 1129 1219 k = kb + 1 1130 1220 wall_index = kb 1131 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, wall_index, z0(j,i), kb, direction, ncorr ) 1221 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, & 1222 wall_index, z0(j,i), kb, direction, ncorr ) 1132 1223 logc_v_s(k,i,1) = lc 1133 1224 logc_ratio_v_s(k,i,1,0:ncorr-1) = lcr(0:ncorr-1) … … 1137 1228 1138 1229 ! 1139 !-- North boundary .1230 !-- North boundary 1140 1231 IF ( nest_bound_n ) THEN 1141 1232 ALLOCATE( logc_u_n(nzb:nzt_topo_nestbc_n,nxl:nxr,1:2) ) … … 1149 1240 direction = 1 1150 1241 inc = 1 1151 DO i = nxl, nxr 1152 1242 DO i = nxl, nxr 1153 1243 ! 1154 1244 !-- North boundary for u. … … 1157 1247 k = kb + 1 1158 1248 wall_index = kb 1159 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, wall_index, z0(j,i), kb, direction, ncorr ) 1249 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, & 1250 wall_index, z0(j,i), kb, direction, ncorr ) 1160 1251 logc_u_n(k,i,1) = lc 1161 1252 logc_ratio_u_n(k,i,1,0:ncorr-1) = lcr(0:ncorr-1) … … 1168 1259 k = kb + 1 1169 1260 wall_index = kb 1170 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, wall_index, z0(j,i), kb, direction, ncorr ) 1261 CALL pmci_define_loglaw_correction_parameters( lc, lcr, k, j, inc, & 1262 wall_index, z0(j,i), kb, direction, ncorr ) 1171 1263 logc_v_n(k,i,1) = lc 1172 1264 logc_ratio_v_n(k,i,1,0:ncorr-1) = lcr(0:ncorr-1) 1173 1265 lcr(0:ncorr-1) = 1.0_wp 1266 1174 1267 ENDDO 1175 1268 ENDIF … … 2011 2104 END SUBROUTINE pmci_init_tkefactor 2012 2105 2013 2106 #endif 2014 2107 END SUBROUTINE pmci_setup_client 2015 2108 … … 2017 2110 2018 2111 SUBROUTINE pmci_setup_coordinates 2112 2113 #if defined( __parallel ) 2019 2114 IMPLICIT NONE 2115 2020 2116 INTEGER(iwp) :: i !: 2021 2117 INTEGER(iwp) :: j !: … … 2033 2129 coord_y(j) = lower_left_coord_y + j * dy 2034 2130 ENDDO 2035 2131 2132 #endif 2036 2133 END SUBROUTINE pmci_setup_coordinates 2037 2134 … … 2040 2137 SUBROUTINE pmci_server_synchronize 2041 2138 2139 #if defined( __parallel ) 2042 2140 ! 2043 2141 !-- Unify the time steps for each model and synchronize. … … 2066 2164 ! 2067 2165 !-- Broadcast the unified time step to all server processes. 2068 CALL MPI_B cast( dt_3d, 1, MPI_REAL, 0, comm2d, ierr )2166 CALL MPI_BCAST( dt_3d, 1, MPI_REAL, 0, comm2d, ierr ) 2069 2167 2070 2168 ! … … 2076 2174 ENDIF 2077 2175 ENDDO 2078 2176 2177 #endif 2079 2178 END SUBROUTINE pmci_server_synchronize 2080 2179 … … 2083 2182 SUBROUTINE pmci_client_synchronize 2084 2183 2184 #if defined( __parallel ) 2085 2185 ! 2086 2186 !-- Unify the time steps for each model and synchronize. … … 2105 2205 ! 2106 2206 !-- Broadcast the unified time step to all server processes. 2107 CALL MPI_B cast( dt_3d, 1, MPI_REAL, 0, comm2d, ierr )2207 CALL MPI_BCAST( dt_3d, 1, MPI_REAL, 0, comm2d, ierr ) 2108 2208 ENDIF 2109 2209 2210 #endif 2110 2211 END SUBROUTINE pmci_client_synchronize 2111 2212 … … 2113 2214 2114 2215 SUBROUTINE pmci_server_datatrans( direction ) 2216 2115 2217 IMPLICIT NONE 2218 2116 2219 INTEGER(iwp),INTENT(IN) :: direction !: 2220 2221 #if defined( __parallel ) 2117 2222 INTEGER(iwp) :: client_id !: 2118 2223 INTEGER(iwp) :: i !: … … 2138 2243 ! 2139 2244 !-- Broadcast the unified time step to all server processes. 2140 CALL MPI_B cast( dt_3d, 1, MPI_REAL, 0, comm2d, ierr )2245 CALL MPI_BCAST( dt_3d, 1, MPI_REAL, 0, comm2d, ierr ) 2141 2246 2142 2247 DO m = 1, SIZE( PMC_Server_for_Client ) - 1 … … 2185 2290 ENDDO 2186 2291 2292 #endif 2187 2293 END SUBROUTINE pmci_server_datatrans 2188 2294 … … 2190 2296 2191 2297 SUBROUTINE pmci_client_datatrans( direction ) 2298 2192 2299 IMPLICIT NONE 2300 2193 2301 INTEGER(iwp), INTENT(IN) :: direction !: 2302 2303 #if defined( __parallel ) 2194 2304 INTEGER(iwp) :: ierr !: 2195 2305 INTEGER(iwp) :: icl !: … … 2214 2324 ! 2215 2325 !-- Broadcast the unified time step to all server processes. 2216 CALL MPI_B cast( dt_3d, 1, MPI_REAL, 0, comm2d, ierr )2326 CALL MPI_BCAST( dt_3d, 1, MPI_REAL, 0, comm2d, ierr ) 2217 2327 CALL cpu_log( log_point_s(70), 'PMC model sync', 'stop' ) 2218 2328 … … 3179 3289 !-- Spatial under-relaxation. 3180 3290 fra = frax(l) * fray(m) * fraz(n) 3291 !-- TO_DO: why not KIND=wp ? 3181 3292 fc(n,m,l) = ( 1.0_wp - fra ) * fc(n,m,l) + fra * cellsum / REAL( nfc, KIND=KIND(cellsum) ) 3182 3293 ENDDO … … 3186 3297 END SUBROUTINE pmci_anterp_tophat 3187 3298 3188 3299 #endif 3189 3300 END SUBROUTINE pmci_client_datatrans 3190 3301 … … 3193 3304 SUBROUTINE pmci_update_new 3194 3305 3306 #if defined( __parallel ) 3195 3307 ! 3196 3308 !-- Copy the interpolated/anterpolated boundary values to the _p … … 3219 3331 3220 3332 ! 3221 !-- Find out later if nesting would work without __nopointer. 3333 !-- TO_DO: Find out later if nesting would work without __nopointer. 3334 #endif 3222 3335 3223 3336 END SUBROUTINE pmci_update_new … … 3226 3339 3227 3340 SUBROUTINE pmci_set_array_pointer( name, client_id, nz_cl ) 3341 3228 3342 IMPLICIT NONE 3229 3343 … … 3232 3346 CHARACTER(LEN=*), INTENT(IN) :: name !: 3233 3347 3348 #if defined( __parallel ) 3234 3349 REAL(wp), POINTER, DIMENSION(:,:) :: p_2d !: 3235 3350 REAL(wp), POINTER, DIMENSION(:,:,:) :: p_3d !: … … 3238 3353 3239 3354 3240 #if defined PMC_ACTIVE3241 3355 NULLIFY( p_3d ) 3242 3356 NULLIFY( p_2d ) … … 3257 3371 CALL pmc_s_set_dataarray( client_id, p_2d ) 3258 3372 ELSE 3259 IF ( myid == 0 ) WRITE( 0, * ) 'PMC set_array_Pointer -> no pointer p_2d or p_3d associated ' 3260 CALL MPI_Abort( MPI_COMM_WORLD, istat, ierr ) 3373 ! 3374 !-- Give only one message for the root domain 3375 IF ( myid == 0 .AND. cpl_id == 1 ) THEN 3376 3377 message_string = 'pointer for array "' // TRIM( name ) // & 3378 '" can''t be associated' 3379 CALL message( 'pmci_set_array_pointer', 'PA0117', 3, 2, 0, 6, 0 ) 3380 ELSE 3381 ! 3382 !-- Avoid others to continue 3383 CALL MPI_BARRIER( comm2d, ierr ) 3384 ENDIF 3261 3385 ENDIF 3262 3386 3263 3387 #endif 3264 3265 3388 END SUBROUTINE pmci_set_array_pointer 3266 3389 … … 3268 3391 3269 3392 SUBROUTINE pmci_create_client_arrays( name, is, ie, js, je, nzc ) 3393 3270 3394 IMPLICIT NONE 3395 3271 3396 INTEGER(iwp), INTENT(IN) :: ie !: 3272 3397 INTEGER(iwp), INTENT(IN) :: is !: … … 3276 3401 CHARACTER(LEN=*), INTENT(IN) :: name !: 3277 3402 3403 #if defined( __parallel ) 3278 3404 REAL(wp), POINTER,DIMENSION(:,:) :: p_2d !: 3279 3405 REAL(wp), POINTER,DIMENSION(:,:,:) :: p_3d !: … … 3282 3408 3283 3409 3284 #if defined PMC_ACTIVE3285 3410 NULLIFY( p_3d ) 3286 3411 NULLIFY( p_2d ) … … 3317 3442 CALL pmc_c_set_dataarray( p_2d ) 3318 3443 ELSE 3319 IF ( myid == 0 ) WRITE( 0 , * ) 'PMC create_client_arrays -> no pointer p_2d or p_3d associated ' 3320 CALL MPI_Abort( MPI_COMM_WORLD, istat, ierr ) 3444 ! 3445 !-- Give only one message for the first client domain 3446 IF ( myid == 0 .AND. cpl_id == 2 ) THEN 3447 3448 message_string = 'pointer for array "' // TRIM( name ) // & 3449 '" can''t be associated' 3450 CALL message( 'pmci_create_client_arrays', 'PA0170', 3, 2, 0, 6, 0 ) 3451 ELSE 3452 ! 3453 !-- Avoid others to continue 3454 CALL MPI_BARRIER( comm2d, ierr ) 3455 ENDIF 3321 3456 ENDIF 3457 3322 3458 #endif 3323 3324 3459 END SUBROUTINE pmci_create_client_arrays 3325 3460 … … 3327 3462 3328 3463 SUBROUTINE pmci_server_initialize 3464 3465 #if defined( __parallel ) 3329 3466 IMPLICIT NONE 3467 3330 3468 INTEGER(iwp) :: client_id !: 3331 3469 INTEGER(iwp) :: m !: … … 3338 3476 ENDDO 3339 3477 3478 #endif 3340 3479 END SUBROUTINE pmci_server_initialize 3341 3480 … … 3344 3483 SUBROUTINE pmci_client_initialize 3345 3484 3485 #if defined( __parallel ) 3346 3486 IMPLICIT NONE 3487 3347 3488 INTEGER(iwp) :: i !: 3348 3489 INTEGER(iwp) :: icl !: … … 3428 3569 INTEGER(iwp) :: i !: 3429 3570 INTEGER(iwp) :: ib !: 3571 INTEGER(iwp) :: ie !: 3430 3572 INTEGER(iwp) :: j !: 3431 3573 INTEGER(iwp) :: jb !: 3574 INTEGER(iwp) :: je !: 3432 3575 INTEGER(iwp) :: k !: 3433 3576 INTEGER(iwp) :: k1 !: … … 3448 3591 3449 3592 ib = nxl 3450 jb = nys 3593 ie = nxr 3594 jb = nys 3595 je = nyn 3451 3596 IF ( nest_bound_l ) THEN 3597 ib = nxl - 1 3452 3598 IF ( var == 'u' ) THEN ! For u, nxl is a ghost node, but not for the other variables. 3453 ib = nxl + 13599 ib = nxl 3454 3600 ENDIF 3455 3601 ENDIF 3456 3602 IF ( nest_bound_s ) THEN 3603 jb = nys - 1 3457 3604 IF ( var == 'v' ) THEN ! For v, nys is a ghost node, but not for the other variables. 3458 jb = nys + 13605 jb = nys 3459 3606 ENDIF 3460 3607 ENDIF 3608 IF ( nest_bound_r ) THEN 3609 ie = nxr + 1 3610 ENDIF 3611 IF ( nest_bound_n ) THEN 3612 je = nyn + 1 3613 ENDIF 3461 3614 3462 3615 ! 3463 3616 !-- Trilinear interpolation. 3464 DO i = ib, nxr3465 DO j = jb, nyn3466 DO k = kb(j,i), nzt 3617 DO i = ib, ie 3618 DO j = jb, je 3619 DO k = kb(j,i), nzt + 1 3467 3620 l = ic(i) 3468 3621 m = jc(j) … … 3517 3670 END SUBROUTINE pmci_interp_tril_all 3518 3671 3672 #endif 3519 3673 END SUBROUTINE pmci_client_initialize 3520 3674 … … 3523 3677 SUBROUTINE pmci_ensure_nest_mass_conservation 3524 3678 3679 #if defined( __parallel ) 3525 3680 ! 3526 3681 !-- Adjust the volume-flow rate through the top boundary … … 3635 3790 ENDDO 3636 3791 3792 #endif 3637 3793 END SUBROUTINE pmci_ensure_nest_mass_conservation 3638 3794
Note: See TracChangeset
for help on using the changeset viewer.