Changeset 206 for palm/trunk/SOURCE
- Timestamp:
- Oct 13, 2008 2:59:11 PM (16 years ago)
- Location:
- palm/trunk/SOURCE
- Files:
-
- 1 added
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/CURRENT_MODIFICATIONS
r198 r206 1 1 New: 2 2 --- 3 Restart runs on SGI-ICE are working (mrun). 4 2d-decomposition is default on SGI-ICE systems. (init_pegrid) 3 5 6 Ocean-atmosphere coupling realized with MPI-1. mrun adjusted for this case 7 (-Y option). Adjustments in mrun, mbuild, and subjob for lcxt4. 8 9 10 check_for_restart, check_parameters, init_dvrp, init_pegrid, local_stop, modules, palm, surface_coupler, timestep 11 Makefile, mrun, mbuild, subjob 12 13 New: init_coupling 4 14 5 15 … … 8 18 9 19 10 11 20 Errors: 12 21 ------ 13 22 23 Bugfix: error in zu index in case of section_xy = -1 (header) 24 25 header -
palm/trunk/SOURCE/Makefile
r151 r206 4 4 # Actual revisions: 5 5 # ----------------- 6 # +plant_canopy_model, inflow_turbulence 7 # 8 # +surface_coupler 6 # +init_coupling 9 7 # 10 8 # Former revisions: 11 9 # ----------------- 12 10 # $Id$ 11 # 12 # 151 2008-03-07 13:42:18Z raasch 13 # +plant_canopy_model, inflow_turbulence 14 # +surface_coupler 13 15 # 14 16 # 96 2007-06-04 08:07:41Z raasch … … 56 58 fft_xy.f90 flow_statistics.f90 global_min_max.f90 header.f90 \ 57 59 impact_of_latent_heat.f90 inflow_turbulence.f90 init_1d_model.f90 \ 58 init_3d_model.f90 init_advec.f90 init_cloud_physics.f90 init_ dvrp.f90 \59 init_ grid.f90 init_ocean.f90 init_particles.f90 init_pegrid.f90 \60 init_3d_model.f90 init_advec.f90 init_cloud_physics.f90 init_coupling.f90 \ 61 init_dvrp.f90 init_grid.f90 init_ocean.f90 init_particles.f90 init_pegrid.f90 \ 60 62 init_pt_anomaly.f90 init_rankine.f90 init_slope.f90 \ 61 63 interaction_droplets_ptq.f90 local_flush.f90 local_getenv.f90 \ … … 89 91 flow_statistics.o global_min_max.o header.o impact_of_latent_heat.o \ 90 92 inflow_turbulence.o init_1d_model.o init_3d_model.o init_advec.o init_cloud_physics.o \ 91 init_ dvrp.o init_grid.o init_ocean.o init_particles.o init_pegrid.o \93 init_coupling.o init_dvrp.o init_grid.o init_ocean.o init_particles.o init_pegrid.o \ 92 94 init_pt_anomaly.o init_rankine.o init_slope.o \ 93 95 interaction_droplets_ptq.o local_flush.o local_getenv.o local_stop.o \ … … 188 190 init_advec.o: modules.o 189 191 init_cloud_physics.o: modules.o 192 init_coupling.o: modules.o 190 193 init_dvrp.o: modules.o 191 194 init_grid.o: modules.o … … 245 248 write_compressed.o: modules.o 246 249 write_var_list.o: modules.o 247 -
palm/trunk/SOURCE/check_for_restart.f90
r110 r206 4 4 ! Actual revisions: 5 5 ! ----------------- 6 ! 6 ! Implementation of an MPI-1 coupling: replaced myid with target_id 7 7 ! 8 8 ! Former revisions: … … 64 64 !-- Output that job will be terminated 65 65 IF ( terminate_run .AND. myid == 0 ) THEN 66 PRINT*, '*** WARNING: run will be terminated because it is running out', &67 ' o f job cpu limit'66 PRINT*, '*** WARNING: run will be terminated because it is running', & 67 ' out of job cpu limit' 68 68 PRINT*, ' remaining time: ', remaining_time, ' s' 69 PRINT*, ' termination time needed:', termination_time_needed,&70 ' s'69 PRINT*, ' termination time needed:', & 70 termination_time_needed, ' s' 71 71 ENDIF 72 72 … … 80 80 81 81 terminate_coupled = 3 82 CALL MPI_SENDRECV( terminate_coupled, 1, MPI_INTEGER, myid, 0, & 83 terminate_coupled_remote, 1, MPI_INTEGER, myid, 0, & 82 CALL MPI_SENDRECV( terminate_coupled, 1, MPI_INTEGER, & 83 target_id, 0, & 84 terminate_coupled_remote, 1, MPI_INTEGER, & 85 target_id, 0, & 84 86 comm_inter, status, ierr ) 85 87 ENDIF … … 107 109 'settings of' 108 110 PRINT*, ' restart_time / dt_restart' 109 PRINT*, ' new restart time is: ', time_restart, ' s' 111 PRINT*, ' new restart time is: ', time_restart, & 112 ' s' 110 113 ENDIF 111 114 ! … … 114 117 !-- informed of another termination reason (terminate_coupled > 0) before, 115 118 !-- or vice versa (terminate_coupled_remote > 0). 116 IF ( coupling_mode /= 'uncoupled' .AND. terminate_coupled == 0 .AND.&117 terminate_coupled_remote == 0) THEN119 IF ( coupling_mode /= 'uncoupled' .AND. terminate_coupled == 0 & 120 .AND. terminate_coupled_remote == 0 ) THEN 118 121 119 122 IF ( dt_restart /= 9999999.9 ) THEN … … 122 125 terminate_coupled = 5 123 126 ENDIF 124 CALL MPI_SENDRECV( & 125 terminate_coupled, 1, MPI_INTEGER, myid, 0, & 126 terminate_coupled_remote, 1, MPI_INTEGER, myid, 0, & 127 comm_inter, status, ierr ) 127 CALL MPI_SENDRECV( terminate_coupled, 1, MPI_INTEGER, & 128 target_id, 0, & 129 terminate_coupled_remote, 1, MPI_INTEGER, & 130 target_id, 0, & 131 comm_inter, status, ierr ) 128 132 ENDIF 129 133 ELSE -
palm/trunk/SOURCE/check_parameters.f90
r198 r206 4 4 ! Actual revisions: 5 5 ! ----------------- 6 ! 6 ! Implementation of an MPI-1 coupling: replaced myid with target_id, 7 ! deleted __mpi2 directives 7 8 ! 8 9 ! Former revisions: … … 139 140 CALL local_stop 140 141 ENDIF 141 #if defined( __parallel ) && defined( __mpi2 ) 142 CALL MPI_SEND( dt_coupling, 1, MPI_REAL, myid, 11, comm_inter, ierr ) 143 CALL MPI_RECV( remote, 1, MPI_REAL, myid, 11, comm_inter, status, ierr ) 142 #if defined( __parallel ) 143 CALL MPI_SEND( dt_coupling, 1, MPI_REAL, target_id, 11, comm_inter, & 144 ierr ) 145 CALL MPI_RECV( remote, 1, MPI_REAL, target_id, 11, comm_inter, & 146 status, ierr ) 144 147 IF ( dt_coupling /= remote ) THEN 145 148 IF ( myid == 0 ) THEN … … 151 154 ENDIF 152 155 IF ( dt_coupling <= 0.0 ) THEN 153 CALL MPI_SEND( dt_max, 1, MPI_REAL, myid, 19, comm_inter, ierr )154 CALL MPI_RECV( remote, 1, MPI_REAL, myid, 19, comm_inter, status, &155 ierr )156 CALL MPI_SEND( dt_max, 1, MPI_REAL, target_id, 19, comm_inter, ierr ) 157 CALL MPI_RECV( remote, 1, MPI_REAL, target_id, 19, comm_inter, & 158 status, ierr ) 156 159 dt_coupling = MAX( dt_max, remote ) 157 160 IF ( myid == 0 ) THEN … … 162 165 ENDIF 163 166 ENDIF 164 CALL MPI_SEND( restart_time, 1, MPI_REAL, myid, 12, comm_inter, ierr ) 165 CALL MPI_RECV( remote, 1, MPI_REAL, myid, 12, comm_inter, status, ierr ) 167 CALL MPI_SEND( restart_time, 1, MPI_REAL, target_id, 12, comm_inter, & 168 ierr ) 169 CALL MPI_RECV( remote, 1, MPI_REAL, target_id, 12, comm_inter, & 170 status, ierr ) 166 171 IF ( restart_time /= remote ) THEN 167 172 IF ( myid == 0 ) THEN … … 172 177 CALL local_stop 173 178 ENDIF 174 CALL MPI_SEND( dt_restart, 1, MPI_REAL, myid, 13, comm_inter, ierr ) 175 CALL MPI_RECV( remote, 1, MPI_REAL, myid, 13, comm_inter, status, ierr ) 179 CALL MPI_SEND( dt_restart, 1, MPI_REAL, target_id, 13, comm_inter, & 180 ierr ) 181 CALL MPI_RECV( remote, 1, MPI_REAL, target_id, 13, comm_inter, & 182 status, ierr ) 176 183 IF ( dt_restart /= remote ) THEN 177 184 IF ( myid == 0 ) THEN … … 182 189 CALL local_stop 183 190 ENDIF 184 CALL MPI_SEND( end_time, 1, MPI_REAL, myid, 14, comm_inter, ierr ) 185 CALL MPI_RECV( remote, 1, MPI_REAL, myid, 14, comm_inter, status, ierr ) 191 CALL MPI_SEND( end_time, 1, MPI_REAL, target_id, 14, comm_inter, ierr ) 192 CALL MPI_RECV( remote, 1, MPI_REAL, target_id, 14, comm_inter, & 193 status, ierr ) 186 194 IF ( end_time /= remote ) THEN 187 195 IF ( myid == 0 ) THEN … … 192 200 CALL local_stop 193 201 ENDIF 194 CALL MPI_SEND( dx, 1, MPI_REAL, myid, 15, comm_inter, ierr ) 195 CALL MPI_RECV( remote, 1, MPI_REAL, myid, 15, comm_inter, status, ierr ) 202 CALL MPI_SEND( dx, 1, MPI_REAL, target_id, 15, comm_inter, ierr ) 203 CALL MPI_RECV( remote, 1, MPI_REAL, target_id, 15, comm_inter, & 204 status, ierr ) 196 205 IF ( dx /= remote ) THEN 197 206 IF ( myid == 0 ) THEN … … 202 211 CALL local_stop 203 212 ENDIF 204 CALL MPI_SEND( dy, 1, MPI_REAL, myid, 16, comm_inter, ierr ) 205 CALL MPI_RECV( remote, 1, MPI_REAL, myid, 16, comm_inter, status, ierr ) 213 CALL MPI_SEND( dy, 1, MPI_REAL, target_id, 16, comm_inter, ierr ) 214 CALL MPI_RECV( remote, 1, MPI_REAL, target_id, 16, comm_inter, & 215 status, ierr ) 206 216 IF ( dy /= remote ) THEN 207 217 IF ( myid == 0 ) THEN … … 212 222 CALL local_stop 213 223 ENDIF 214 CALL MPI_SEND( nx, 1, MPI_INTEGER, myid, 17, comm_inter, ierr )215 CALL MPI_RECV( iremote, 1, MPI_INTEGER, myid, 17, comm_inter, status, &216 ierr )224 CALL MPI_SEND( nx, 1, MPI_INTEGER, target_id, 17, comm_inter, ierr ) 225 CALL MPI_RECV( iremote, 1, MPI_INTEGER, target_id, 17, comm_inter, & 226 status, ierr ) 217 227 IF ( nx /= iremote ) THEN 218 228 IF ( myid == 0 ) THEN … … 223 233 CALL local_stop 224 234 ENDIF 225 CALL MPI_SEND( ny, 1, MPI_INTEGER, myid, 18, comm_inter, ierr )226 CALL MPI_RECV( iremote, 1, MPI_INTEGER, myid, 18, comm_inter, status, &227 ierr )235 CALL MPI_SEND( ny, 1, MPI_INTEGER, target_id, 18, comm_inter, ierr ) 236 CALL MPI_RECV( iremote, 1, MPI_INTEGER, target_id, 18, comm_inter, & 237 status, ierr ) 228 238 IF ( ny /= iremote ) THEN 229 239 IF ( myid == 0 ) THEN … … 237 247 ENDIF 238 248 239 #if defined( __parallel ) && defined( __mpi2 )249 #if defined( __parallel ) 240 250 ! 241 251 !-- Exchange via intercommunicator 242 252 IF ( coupling_mode == 'atmosphere_to_ocean' ) THEN 243 CALL MPI_SEND( humidity, &244 1, MPI_LOGICAL, myid, 19, comm_inter,ierr )253 CALL MPI_SEND( humidity, 1, MPI_LOGICAL, target_id, 19, comm_inter, & 254 ierr ) 245 255 ELSEIF ( coupling_mode == 'ocean_to_atmosphere' ) THEN 246 CALL MPI_RECV( humidity_remote, &247 1, MPI_LOGICAL, myid, 19,comm_inter, status, ierr )256 CALL MPI_RECV( humidity_remote, 1, MPI_LOGICAL, target_id, 19, & 257 comm_inter, status, ierr ) 248 258 ENDIF 249 259 #endif -
palm/trunk/SOURCE/header.f90
r200 r206 4 4 ! Actual revisions: 5 5 ! ----------------- 6 ! 6 ! Bugfix: error in zu index in case of section_xy = -1 7 7 ! 8 8 ! Former revisions: … … 703 703 slices = TRIM( slices ) // TRIM( section_chr ) // '/' 704 704 705 WRITE (coor_chr,'(F10.1)') zu(section(i,1)) 705 IF ( section(i,1) == -1 ) THEN 706 WRITE (coor_chr,'(F10.1)') -1.0 707 ELSE 708 WRITE (coor_chr,'(F10.1)') zu(section(i,1)) 709 ENDIF 706 710 coor_chr = ADJUSTL( coor_chr ) 707 711 coordinates = TRIM( coordinates ) // TRIM( coor_chr ) // '/' -
palm/trunk/SOURCE/init_dvrp.f90
r198 r206 7 7 ! TEST: print* statements 8 8 ! ToDo: checking of mode_dvrp for legal values is not correct 9 ! 9 ! Implementation of a MPI-1 coupling: __mpi2 adjustments for MPI_COMM_WORLD 10 10 ! Former revisions: 11 11 ! ----------------- … … 49 49 USE pegrid 50 50 USE control_parameters 51 52 ! 53 !-- New coupling 54 USE coupling 51 55 52 56 IMPLICIT NONE … … 600 604 WRITE ( 9, * ) '*** myid=', myid, ' vor DVRP_SPLIT' 601 605 CALL local_flush( 9 ) 606 607 ! 608 !-- Adjustment for new MPI-1 coupling. This might be unnecessary. 609 #if defined( __mpi2 ) 602 610 CALL DVRP_SPLIT( MPI_COMM_WORLD, comm_palm ) 611 #else 612 IF ( coupling_mode /= 'uncoupled' ) THEN 613 CALL DVRP_SPLIT( comm_inter, comm_palm ) 614 ELSE 615 CALL DVRP_SPLIT( MPI_COMM_WORLD, comm_palm ) 616 ENDIF 617 #endif 618 603 619 WRITE ( 9, * ) '*** myid=', myid, ' nach DVRP_SPLIT' 604 620 CALL local_flush( 9 ) -
palm/trunk/SOURCE/init_pegrid.f90
r198 r206 4 4 ! Actual revisions: 5 5 ! ----------------- 6 ! Implementation of a MPI-1 coupling: added __parallel within the __mpi2 part 7 ! 2d-decomposition is default on SGI-ICE systems 6 8 ! ATTENTION: nnz_x undefined problem still has to be solved!!!!!!!! 7 9 ! TEST OUTPUT (TO BE REMOVED) logging mpi2 ierr values … … 93 95 !-- Automatic determination of the topology 94 96 !-- The default on SMP- and cluster-hosts is a 1d-decomposition along x 95 IF ( host(1:3) == 'ibm' .OR. host(1:3) == 'nec' .OR. & 96 host(1:2) == 'lc' .OR. host(1:3) == 'dec' ) THEN 97 IF ( host(1:3) == 'ibm' .OR. host(1:3) == 'nec' .OR. & 98 ( host(1:2) == 'lc' .AND. host(3:5) /= 'sgi' ) .OR. & 99 host(1:3) == 'dec' ) THEN 97 100 98 101 pdims(1) = numprocs … … 540 543 #endif 541 544 545 #if defined( __parallel ) 542 546 #if defined( __mpi2 ) 543 547 ! … … 623 627 624 628 ENDIF 629 #endif 625 630 626 631 ! -
palm/trunk/SOURCE/local_stop.f90
r198 r206 4 4 ! Actual revisions: 5 5 ! ----------------- 6 ! 7 ! 6 ! Implementation of a MPI-1 coupling: replaced myid with target_id 8 7 ! 9 8 ! Former revisions: … … 34 33 USE control_parameters 35 34 35 36 36 #if defined( __parallel ) 37 37 IF ( coupling_mode == 'uncoupled' ) THEN … … 55 55 terminate_coupled = 1 56 56 CALL MPI_SENDRECV( & 57 terminate_coupled, 1, MPI_INTEGER, myid, 0, &58 terminate_coupled_remote, 1, MPI_INTEGER, myid, 0, &57 terminate_coupled, 1, MPI_INTEGER, target_id, 0, & 58 terminate_coupled_remote, 1, MPI_INTEGER, target_id, 0, & 59 59 comm_inter, status, ierr ) 60 60 ENDIF -
palm/trunk/SOURCE/modules.f90
r198 r206 5 5 ! Actual revisions: 6 6 ! ----------------- 7 ! 7 ! +target_id 8 8 ! 9 9 ! Former revisions: … … 973 973 #endif 974 974 CHARACTER(LEN=5) :: myid_char = '' 975 INTEGER :: id_inflow = 0, id_recycling = 0, myid=0, npex = -1, & 976 npey = -1, numprocs = 1, numprocs_previous_run = -1,& 975 INTEGER :: id_inflow = 0, id_recycling = 0, myid = 0, & 976 target_id, npex = -1, npey = -1, numprocs = 1, & 977 numprocs_previous_run = -1, & 977 978 tasks_per_node = -9999, threads_per_task = 1 978 979 -
palm/trunk/SOURCE/palm.f90
r198 r206 4 4 ! Actual revisions: 5 5 ! ----------------- 6 ! 6 ! Initialization of coupled runs modified for MPI-1 and moved to external 7 ! subroutine init_coupling 7 8 ! 8 9 ! Former revisions: … … 77 78 CALL MPI_INIT( ierr ) 78 79 CALL MPI_COMM_SIZE( MPI_COMM_WORLD, numprocs, ierr ) 80 CALL MPI_COMM_RANK( MPI_COMM_WORLD, myid, ierr ) 79 81 comm_palm = MPI_COMM_WORLD 80 82 comm2d = MPI_COMM_WORLD 81 #endif 82 83 #if defined( __mpi2 ) 84 ! 85 !-- Get information about the coupling mode from the environment variable 86 !-- which has been set by the mpiexec command. 87 !-- This method is currently not used because the mpiexec command is not 88 !-- available on some machines 89 ! CALL local_getenv( 'coupling_mode', 13, coupling_mode, i ) 90 ! IF ( i == 0 ) coupling_mode = 'uncoupled' 91 ! IF ( coupling_mode == 'ocean_to_atmosphere' ) coupling_char = '_O' 92 93 ! 94 !-- Get information about the coupling mode from standard input (PE0 only) and 95 !-- distribute it to the other PEs 96 CALL MPI_COMM_RANK( MPI_COMM_WORLD, myid, ierr ) 97 IF ( myid == 0 ) THEN 98 READ (*,*,ERR=10,END=10) coupling_mode 99 10 IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' ) THEN 100 i = 1 101 ELSEIF ( TRIM( coupling_mode ) == 'ocean_to_atmosphere' ) THEN 102 i = 2 103 ELSE 104 i = 0 105 ENDIF 106 ENDIF 107 CALL MPI_BCAST( i, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr ) 108 IF ( i == 0 ) THEN 109 coupling_mode = 'uncoupled' 110 ELSEIF ( i == 1 ) THEN 111 coupling_mode = 'atmosphere_to_ocean' 112 ELSEIF ( i == 2 ) THEN 113 coupling_mode = 'ocean_to_atmosphere' 114 ENDIF 115 IF ( coupling_mode == 'ocean_to_atmosphere' ) coupling_char = '_O' 83 84 ! 85 !-- Initialize PE topology in case of coupled runs 86 CALL init_coupling 116 87 #endif 117 88 … … 124 95 CALL cpu_log( log_point(1), 'total', 'start' ) 125 96 CALL cpu_log( log_point(2), 'initialisation', 'start' ) 97 98 ! 99 !-- Open a file for debug output 100 WRITE (myid_char,'(''_'',I4.4)') myid 101 OPEN( 9, FILE='DEBUG'//TRIM( coupling_char )//myid_char, FORM='FORMATTED' ) 126 102 127 103 ! … … 132 108 #if defined( __parallel ) 133 109 CALL MPI_COMM_RANK( comm_palm, myid, ierr ) 134 #endif135 136 !137 !-- Open a file for debug output138 WRITE (myid_char,'(''_'',I4.4)') myid139 OPEN( 9, FILE='DEBUG'//TRIM( coupling_char )//myid_char, FORM='FORMATTED' )140 141 #if defined( __mpi2 )142 110 ! 143 111 !-- TEST OUTPUT (TO BE REMOVED) 144 112 WRITE(9,*) '*** coupling_mode = "', TRIM( coupling_mode ), '"' 145 113 CALL LOCAL_FLUSH( 9 ) 146 print*, '*** PE', myid, ' ', TRIM( coupling_mode ) 114 PRINT*, '*** PE', myid, ' Global target PE:', target_id, & 115 TRIM( coupling_mode ) 147 116 #endif 148 117 … … 220 189 #if defined( __mpi2 ) 221 190 ! 222 !-- Test exchange via intercommunicator 191 !-- Test exchange via intercommunicator in case of a MPI-2 coupling 223 192 IF ( coupling_mode == 'atmosphere_to_ocean' ) THEN 224 193 i = 12345 + myid … … 240 209 241 210 END PROGRAM palm 242 -
palm/trunk/SOURCE/surface_coupler.f90
r110 r206 4 4 ! Actual revisions: 5 5 ! ----------------- 6 ! 6 ! Implementation of a MPI-1 Coupling: replaced myid with target_id, 7 ! deleted __mpi2 directives 7 8 ! 8 9 ! Former revisions: … … 32 33 REAL :: simulated_time_remote 33 34 34 #if defined( __parallel ) && defined( __mpi2 )35 #if defined( __parallel ) 35 36 36 CALL cpu_log( log_point(39), 'surface_coupler', 'start' )37 CALL cpu_log( log_point(39), 'surface_coupler', 'start' ) 37 38 38 39 ! … … 43 44 !-- If necessary, the coupler will be called at the beginning of the next 44 45 !-- restart run. 45 CALL MPI_SENDRECV( terminate_coupled, 1, MPI_INTEGER, myid, 0, & 46 terminate_coupled_remote, 1, MPI_INTEGER, myid, 0, & 47 comm_inter, status, ierr ) 46 CALL MPI_SENDRECV( terminate_coupled, 1, MPI_INTEGER, target_id, & 47 0, & 48 terminate_coupled_remote, 1, MPI_INTEGER, target_id, & 49 0, comm_inter, status, ierr ) 48 50 IF ( terminate_coupled_remote > 0 ) THEN 49 51 IF ( myid == 0 ) THEN … … 64 66 !-- Exchange the current simulated time between the models, 65 67 !-- currently just for testing 66 CALL MPI_SEND( simulated_time, 1, MPI_REAL, myid, 11, comm_inter, ierr ) 67 CALL MPI_RECV( simulated_time_remote, 1, MPI_REAL, myid, 11, & 68 CALL MPI_SEND( simulated_time, 1, MPI_REAL, target_id, 11, & 69 comm_inter, ierr ) 70 CALL MPI_RECV( simulated_time_remote, 1, MPI_REAL, target_id, 11, & 68 71 comm_inter, status, ierr ) 69 72 WRITE ( 9, * ) simulated_time, ' remote: ', simulated_time_remote … … 78 81 WRITE ( 9, * ) '*** send shf to ocean' 79 82 CALL local_flush( 9 ) 80 CALL MPI_SEND( shf(nys-1,nxl-1), ngp_xy, MPI_REAL, myid, 12, &83 CALL MPI_SEND( shf(nys-1,nxl-1), ngp_xy, MPI_REAL, target_id, 12, & 81 84 comm_inter, ierr ) 82 WRITE ( 9, * ) ' ready'83 CALL local_flush( 9 )84 85 85 86 ! … … 88 89 WRITE ( 9, * ) '*** send qsws to ocean' 89 90 CALL local_flush( 9 ) 90 CALL MPI_SEND( qsws(nys-1,nxl-1), ngp_xy, MPI_REAL, myid, 13, &91 CALL MPI_SEND( qsws(nys-1,nxl-1), ngp_xy, MPI_REAL, target_id, 13, & 91 92 comm_inter, ierr ) 92 WRITE ( 9, * ) ' ready'93 CALL local_flush( 9 )94 93 ENDIF 95 94 … … 98 97 WRITE ( 9, * ) '*** receive pt from ocean' 99 98 CALL local_flush( 9 ) 100 CALL MPI_RECV( pt(0,nys-1,nxl-1), 1, type_xy, myid, 14, comm_inter, & 101 status, ierr ) 102 WRITE ( 9, * ) ' ready' 103 CALL local_flush( 9 ) 99 CALL MPI_RECV( pt(0,nys-1,nxl-1), 1, type_xy, target_id, 14, & 100 comm_inter, status, ierr ) 104 101 105 102 ! … … 107 104 WRITE ( 9, * ) '*** send usws to ocean' 108 105 CALL local_flush( 9 ) 109 CALL MPI_SEND( usws(nys-1,nxl-1), ngp_xy, MPI_REAL, myid, 15, &106 CALL MPI_SEND( usws(nys-1,nxl-1), ngp_xy, MPI_REAL, target_id, 15, & 110 107 comm_inter, ierr ) 111 WRITE ( 9, * ) ' ready'112 CALL local_flush( 9 )113 108 114 109 ! … … 116 111 WRITE ( 9, * ) '*** send vsws to ocean' 117 112 CALL local_flush( 9 ) 118 CALL MPI_SEND( vsws(nys-1,nxl-1), ngp_xy, MPI_REAL, myid, 16, &113 CALL MPI_SEND( vsws(nys-1,nxl-1), ngp_xy, MPI_REAL, target_id, 16, & 119 114 comm_inter, ierr ) 120 WRITE ( 9, * ) ' ready'121 CALL local_flush( 9 )122 115 123 116 ELSEIF ( coupling_mode == 'ocean_to_atmosphere' ) THEN … … 127 120 WRITE ( 9, * ) '*** receive tswst from atmosphere' 128 121 CALL local_flush( 9 ) 129 CALL MPI_RECV( tswst(nys-1,nxl-1), ngp_xy, MPI_REAL, myid, 12, &122 CALL MPI_RECV( tswst(nys-1,nxl-1), ngp_xy, MPI_REAL, target_id, 12, & 130 123 comm_inter, status, ierr ) 131 WRITE ( 9, * ) ' ready'132 CALL local_flush( 9 )133 124 134 125 ! … … 138 129 WRITE ( 9, * ) '*** receive qswst_remote from atmosphere' 139 130 CALL local_flush( 9 ) 140 CALL MPI_RECV( qswst_remote(nys-1,nxl-1), ngp_xy, MPI_REAL, myid, & 141 13, comm_inter, status, ierr ) 142 WRITE ( 9, * ) ' ready' 143 CALL local_flush( 9 ) 131 CALL MPI_RECV( qswst_remote(nys-1,nxl-1), ngp_xy, MPI_REAL, & 132 target_id, 13, comm_inter, status, ierr ) 144 133 145 134 !here tswst is still the sum of atmospheric bottom heat fluxes … … 165 154 WRITE ( 9, * ) '*** send pt to atmosphere' 166 155 CALL local_flush( 9 ) 167 CALL MPI_SEND( pt(nzt,nys-1,nxl-1), 1, type_xy, myid, 14, comm_inter, & 168 ierr ) 169 WRITE ( 9, * ) ' ready' 170 CALL local_flush( 9 ) 156 CALL MPI_SEND( pt(nzt,nys-1,nxl-1), 1, type_xy, target_id, 14, & 157 comm_inter, ierr ) 171 158 172 159 ! … … 175 162 WRITE ( 9, * ) '*** receive uswst from atmosphere' 176 163 CALL local_flush( 9 ) 177 CALL MPI_RECV( uswst(nys-1,nxl-1), ngp_xy, MPI_REAL, myid, 15, &164 CALL MPI_RECV( uswst(nys-1,nxl-1), ngp_xy, MPI_REAL, target_id, 15, & 178 165 comm_inter, status, ierr ) 179 WRITE ( 9, * ) ' ready'180 CALL local_flush( 9 )181 166 182 167 ! … … 185 170 WRITE ( 9, * ) '*** receive vswst from atmosphere' 186 171 CALL local_flush( 9 ) 187 CALL MPI_RECV( vswst(nys-1,nxl-1), ngp_xy, MPI_REAL, myid, 16, &172 CALL MPI_RECV( vswst(nys-1,nxl-1), ngp_xy, MPI_REAL, target_id, 16, & 188 173 comm_inter, status, ierr ) 189 WRITE ( 9, * ) ' ready'190 CALL local_flush( 9 )191 174 192 175 ! -
palm/trunk/SOURCE/timestep.f90
r110 r206 4 4 ! Actual revisions: 5 5 ! ----------------- 6 ! 6 ! Implementation of a MPI-1 Coupling: replaced myid with target_id 7 7 ! 8 8 ! Former revisions: … … 219 219 terminate_coupled = 2 220 220 CALL MPI_SENDRECV( & 221 terminate_coupled, 1, MPI_INTEGER, myid, 0, &222 terminate_coupled_remote, 1, MPI_INTEGER, myid, 0, &221 terminate_coupled, 1, MPI_INTEGER, target_id, 0, & 222 terminate_coupled_remote, 1, MPI_INTEGER, target_id, 0, & 223 223 comm_inter, status, ierr ) 224 224 ENDIF
Note: See TracChangeset
for help on using the changeset viewer.