78 INTEGER(INTG) :: number_computational_nodes
79 INTEGER(INTG) :: number_sub_work_groups
84 LOGICAL :: comp_env_finished
91 INTEGER(INTG) :: number_levels
92 INTEGER(INTG),
ALLOCATABLE :: size(:)
97 INTEGER(INTG) :: number_processors
100 INTEGER(INTG) :: node_name_length
101 CHARACTER(LEN=MPI_MAX_PROCESSOR_NAME) :: node_name
108 INTEGER(INTG) :: mpi_type
109 INTEGER(INTG) :: num_blocks
110 INTEGER(INTG) :: block_lengths(4)
112 INTEGER(MPI_ADDRESS_KIND) :: displacements(4)
117 INTEGER(INTG) :: mpi_comm
118 INTEGER(INTG) :: number_computational_nodes
143 & added_work_group,err,error,*)
148 INTEGER(INTG),
INTENT(IN) :: NUMBER_COMPUTATIONAL_NODES
149 INTEGER(INTG),
INTENT(OUT) :: ERR
157 ALLOCATE(new_work_group%PTR)
158 new_work_group%PTR%NUMBER_COMPUTATIONAL_NODES = number_computational_nodes
159 new_work_group%PTR%NUMBER_SUB_WORK_GROUPS = 0
161 IF(
ASSOCIATED(parent_work_group))
THEN 162 ALLOCATE(sub_work_groups(parent_work_group%NUMBER_SUB_WORK_GROUPS+1))
163 DO i=1,parent_work_group%NUMBER_SUB_WORK_GROUPS
164 sub_work_groups(i)%PTR=>parent_work_group%SUB_WORK_GROUPS(i)%PTR
168 IF(
ALLOCATED(parent_work_group%SUB_WORK_GROUPS))
THEN 169 DEALLOCATE(parent_work_group%SUB_WORK_GROUPS)
171 sub_work_groups(1+parent_work_group%NUMBER_SUB_WORK_GROUPS)%PTR=>new_work_group%PTR
172 ALLOCATE(parent_work_group%SUB_WORK_GROUPS(
SIZE(sub_work_groups,1)))
173 DO i=1,
SIZE(sub_work_groups,1)
174 parent_work_group%SUB_WORK_GROUPS(i)%PTR => sub_work_groups(i)%PTR
178 DEALLOCATE(sub_work_groups)
179 parent_work_group%NUMBER_SUB_WORK_GROUPS = 1+parent_work_group%NUMBER_SUB_WORK_GROUPS
180 new_work_group%PTR%PARENT => parent_work_group
181 tmp_parent_work_group => parent_work_group
182 DO WHILE(
ASSOCIATED(tmp_parent_work_group))
183 tmp_parent_work_group%NUMBER_COMPUTATIONAL_NODES = tmp_parent_work_group%NUMBER_COMPUTATIONAL_NODES &
184 & + new_work_group%PTR%NUMBER_COMPUTATIONAL_NODES
185 tmp_parent_work_group => tmp_parent_work_group%PARENT
188 CALL flagerror(
'PARENT_WORK_GROUP is not associated, call COMPUTATIONAL_WORK_GROUP_CREATE_START first',&
191 added_work_group => new_work_group%PTR
193 exits(
"COMPUTATIONAL_WORK_GROUP_SUBGROUP_ADD")
195 999 errorsexits(
"COMPUTATIONAL_WORK_GROUP_SUBGROUP_ADD",err,error)
208 INTEGER(INTG),
INTENT(IN) :: NUMBER_COMPUTATIONAL_NODES
209 INTEGER(INTG),
INTENT(OUT) :: ERR
214 IF(
ASSOCIATED(world_work_group))
THEN 215 CALL flagerror(
'WORLD_WORK_GROUP is already associated', err, error, *999)
217 ALLOCATE(new_work_group%PTR)
218 new_work_group%PTR%NUMBER_COMPUTATIONAL_NODES = number_computational_nodes
219 new_work_group%PTR%NUMBER_SUB_WORK_GROUPS = 0
220 NULLIFY(new_work_group%PTR%PARENT)
221 NULLIFY(new_work_group%PTR%COMP_ENV)
222 world_work_group=>new_work_group%PTR
225 exits(
"COMPUTATIONAL_WORK_GROUP_CREATE_START")
227 999 errorsexits(
"COMPUTATIONAL_WORK_GROUP_CREATE_START",err,error)
240 INTEGER(INTG),
ALLOCATABLE,
INTENT(INOUT) :: AVAILABLE_RANK_LIST(:)
241 INTEGER(INTG),
INTENT(OUT) :: ERR
244 INTEGER(INTG) :: I,MPI_IERROR,RANK,ORIGINAL_GROUP,NEW_GROUP
245 INTEGER(INTG),
ALLOCATABLE :: NEW_AVAILABLE_RANK_LIST(:)
247 enters(
"Computational_WorkGroupGenerateCompEnviron",err,error,*999)
249 ALLOCATE(work_group%COMP_ENV)
252 work_group%COMP_ENV%NUMBER_COMPUTATIONAL_NODES = work_group%NUMBER_COMPUTATIONAL_NODES
257 work_group%COMP_ENV%MY_COMPUTATIONAL_NODE_NUMBER=rank
260 ALLOCATE(work_group%COMP_ENV%COMPUTATIONAL_NODES(work_group%COMP_ENV%NUMBER_COMPUTATIONAL_NODES))
261 i=
SIZE(available_rank_list,1)
262 IF(
SIZE(available_rank_list,1)-work_group%COMP_ENV%NUMBER_COMPUTATIONAL_NODES < 0)
THEN 263 CALL flagerror(
"NOT ENOUGH RANKS", err, error, *999)
266 DO i=1,work_group%COMP_ENV%NUMBER_COMPUTATIONAL_NODES, 1
272 CALL mpi_comm_group(mpi_comm_world,original_group,mpi_ierror);
274 CALL mpi_group_incl(original_group,i-1,available_rank_list(1:i-1),new_group,mpi_ierror)
276 CALL mpi_comm_create(mpi_comm_world,new_group,work_group%COMP_ENV%MPI_COMM,mpi_ierror)
278 CALL mpi_group_free(original_group,mpi_ierror)
280 CALL mpi_group_free(new_group,mpi_ierror)
284 ALLOCATE(new_available_rank_list(
SIZE(available_rank_list,1)-work_group%COMP_ENV%NUMBER_COMPUTATIONAL_NODES))
285 new_available_rank_list(1:
SIZE(new_available_rank_list)) = available_rank_list(i:
SIZE(available_rank_list,1))
286 DEALLOCATE(available_rank_list)
287 ALLOCATE(available_rank_list(
SIZE(new_available_rank_list,1)))
288 available_rank_list(:) = new_available_rank_list(:)
290 work_group%COMP_ENV_FINISHED = .true.
293 DO i=1,work_group%NUMBER_SUB_WORK_GROUPS,1
295 & available_rank_list,err,error,*999)
298 exits(
"Computational_WorkGroupGenerateCompEnviron")
300 999 errorsexits(
"Computational_WorkGroupGenerateCompEnviron",err,error)
314 INTEGER(INTG),
INTENT(OUT) :: ERR
317 INTEGER(INTG),
ALLOCATABLE:: AVAILABLE_RANK_LIST(:)
320 enters(
"COMPUTATIONAL_WORK_GROUP_CREATE_FINISH",err,error,*999)
324 world_work_group%COMP_ENV_FINISHED = .true.
327 ALLOCATE(available_rank_list(world_work_group%COMP_ENV%NUMBER_COMPUTATIONAL_NODES))
328 DO i=0,
SIZE(available_rank_list,1)-1
329 available_rank_list(i+1) = i
331 DO i=1,world_work_group%NUMBER_SUB_WORK_GROUPS,1
335 exits(
"COMPUTATIONAL_WORK_GROUP_CREATE_FINISH")
337 999 errorsexits(
"COMPUTATIONAL_WORK_GROUP_CREATE_FINISH",err,error)
350 INTEGER(INTG),
INTENT(OUT) :: ERR
354 enters(
"COMPUTATIONAL_NODE_FINALISE",err,error,*999)
356 computational_node%NUMBER_PROCESSORS=0
357 computational_node%RANK=-1
358 computational_node%NODE_NAME_LENGTH=0
359 computational_node%NODE_NAME=
"" 361 exits(
"COMPUTATIONAL_NODE_FINALISE")
363 999 errorsexits(
"COMPUTATIONAL_NODE_FINALISE",err,error)
376 INTEGER(INTG),
INTENT(IN) :: RANK
377 INTEGER(INTG),
INTENT(OUT) :: ERR
380 INTEGER(INTG) :: MPI_IERROR
382 enters(
"COMPUTATIONAL_NODE_INITIALISE",err,error,*999)
386 computational_node%NUMBER_PROCESSORS=1
387 computational_node%RANK=rank
388 CALL mpi_get_processor_name(computational_node%NODE_NAME,computational_node%NODE_NAME_LENGTH,mpi_ierror)
389 CALL mpi_error_check(
"MPI_GET_PROCESSOR_NAME",mpi_ierror,err,error,*999)
391 exits(
"COMPUTATIONAL_NODE_INITIALISE")
393 999 errorsexits(
"COMPUTATIONAL_NODE_INITIALISE",err,error)
405 INTEGER(INTG),
INTENT(OUT) :: ERR
408 INTEGER(INTG) :: i,MPI_IERROR
410 enters(
"COMPUTATIONAL_NODE_MPI_TYPE_FINALISE",err,error,*999)
424 exits(
"COMPUTATIONAL_NODE_MPI_TYPE_FINALISE")
426 999 errorsexits(
"COMPUTATIONAL_NODE_MPI_TYPE_FINALISE",err,error)
439 INTEGER(INTG),
INTENT(OUT) :: ERR
442 INTEGER(INTG) :: I,MPI_IERROR
444 enters(
"COMPUTATIONAL_NODE_MPI_TYPE_INITIALISE",err,error,*999)
472 CALL mpi_error_check(
"MPI_TYPE_CREATE_STRUCT",mpi_ierror,err,error,*999)
490 exits(
"COMPUTATIONAL_NODE_MPI_TYPE_INITIALISE")
493 998 errorsexits(
"COMPUTATIONAL_NODE_MPI_TYPE_INITIALISE",err,error)
505 INTEGER(INTG),
INTENT(OUT) :: ERR
508 INTEGER(INTG) :: COMPUTATIONAL_NODE,MPI_IERROR
510 enters(
"COMPUTATIONAL_ENVIRONMENT_FINALISE",err,error,*999)
531 CALL mpi_finalize(mpi_ierror)
534 exits(
"COMPUTATIONAL_ENVIRONMENT_FINALISE")
536 999 errorsexits(
"COMPUTATIONAL_ENVIRONMENT_FINALISE",err,error)
548 INTEGER(INTG),
INTENT(OUT) :: ERR
551 INTEGER(INTG) :: i,DUMMY_ERR,MPI_IERROR,RANK
554 enters(
"COMPUTATIONAL_ENVIRONMENT_INITIALISE",err,error,*999)
557 CALL mpi_init(mpi_ierror)
570 IF(err /=0)
CALL flagerror(
"Could not allocate computational nodes",err,error,*999)
578 CALL tau_profile_set_node(rank)
600 & number_computational_nodes,err,error,*999)
627 exits(
"COMPUTATIONAL_ENVIRONMENT_INITIALISE")
630 998 errorsexits(
"COMPUTATIONAL_ENVIRONMENT_INITIALISE",err,error)
642 INTEGER(INTG),
INTENT(OUT) :: ERR
645 INTEGER(INTG) :: COMPUTATIONAL_NODE_NUMBER_GET
648 enters(
"COMPUTATIONAL_NODE_NUMBER_GET",err,error,*999)
653 CALL flagerror(
"Computational environment not initialised",err,error,*999)
656 exits(
"COMPUTATIONAL_NODE_NUMBER_GET")
658 999 errorsexits(
"COMPUTATIONAL_NODE_NUMBER_GET",err,error)
670 INTEGER(INTG),
INTENT(OUT) :: ERR
673 INTEGER(INTG) :: COMPUTATIONAL_NODES_NUMBER_GET
676 enters(
"COMPUTATIONAL_NODES_NUMBER_GET",err,error,*999)
681 CALL flagerror(
"Computational environment not initialised",err,error,*999)
684 exits(
"COMPUTATIONAL_NODES_NUMBER_GET")
686 999 errorsexits(
"COMPUTATIONAL_NODES_NUMBER_GET",err,error)
subroutine, public computational_work_group_create_start(WORLD_WORK_GROUP, NUMBER_COMPUTATIONAL_NODES, ERR, ERROR,)
Create the highest level work group (Default: GROUP_WORLD)
subroutine, public enters(NAME, ERR, ERROR,)
Records the entry into the named procedure and initialises the error code.
Contains information on the MPI type to transfer information about a computational node...
integer, parameter ptr
Pointer integer kind.
Contains information on the computational environment the program is running in.
This module is a CMISS buffer module to the PETSc library.
subroutine, public computational_work_group_create_finish(WORLD_WORK_GROUP, ERR, ERROR,)
Generate the hierarchy computational environment based on work group tree.
subroutine, public computational_environment_finalise(ERR, ERROR,)
Finalises the computational environment data structures and deallocates all memory.
Contains information on a cache heirarchy.
subroutine computational_node_initialise(COMPUTATIONAL_NODE, RANK, ERR, ERROR,)
Initialises the computational node data structures.
subroutine computational_node_mpi_type_initialise(COMPUTATIONAL_NODE, ERR, ERROR,)
Initialises the data structure containing the MPI type information for the COMPUTATIONAL_NODE_TYPE.
logical, save, public diagnostics2
.TRUE. if level 2 diagnostic output is active in the current routine
This module provides an iso_varying_string module, conformant to the API specified in ISO/IEC 1539-2:...
This module contains all program wide constants.
subroutine, public computational_work_group_subgroup_add(PARENT_WORK_GROUP, NUMBER_COMPUTATIONAL_NODES, ADDED_WORK_GROUP, ERR, ERROR,)
Add the work sub-group to the parent group based on the computational requirements (called by user) ...
Contains information on logical working groups (added by Robert on 01/04/2010)
Contains information on a computational node containing a number of processors.
!>pointer type to COMPUTATIONAL_WORK_GROUP_TYPE
logical, save, public diagnostics3
.TRUE. if level 3 diagnostic output is active in the current routine
subroutine, public exits(NAME)
Records the exit out of the named procedure.
This module contains all type definitions in order to avoid cyclic module references.
This module contains all the low-level base routines e.g., all debug, control, and low-level communic...
subroutine, public computational_node_numbers_set(MY_NODE_NUMBER, NUMBER_OF_NODES, ERR, ERROR,)
Set the computational node numbers. Note: this is done as a subroutine as COMPUTATIONAL_ENVIRONMENT d...
integer(intg) function, public computational_nodes_number_get(ERR, ERROR)
Returns the number of computational nodes.
subroutine computational_node_mpi_type_finalise(ERR, ERROR,)
Finalises the data structure containing the MPI type information for the COMPUTATIONAL_NODE_TYPE.
This module contains all computational environment variables.
This module contains CMISS MPI routines.
integer(intg), save my_computational_node_number
The computational rank for this node.
recursive subroutine computational_workgroupgeneratecompenviron(WORK_GROUP, AVAILABLE_RANK_LIST, ERR, ERROR,)
Generate computational environment for current level work group tree and all it's subgroups recursive...
type(computational_environment_type), target, public computational_environment
The computational environment the program is running in.
subroutine computational_node_finalise(COMPUTATIONAL_NODE, ERR, ERROR,)
Finalises the computational node data structures and deallocates all memory.
subroutine, public computational_environment_initialise(ERR, ERROR,)
Initialises the computational environment data structures.
subroutine, public petsc_initialise(file, err, error,)
Buffer routine to the PETSc PetscInitialize routine.
type(mpi_computational_node_type) mpi_computational_node_type_data
The MPI data on the computational nodes.
logical, save, public diagnostics1
.TRUE. if level 1 diagnostic output is active in the current routine
integer(intg), parameter, public diagnostic_output_type
Diagnostic output type.
subroutine, public petsc_finalise(err, error,)
Buffer routine to the PETSc PetscFinalize routine.
Flags an error condition.
integer(intg) function, public computational_node_number_get(ERR, ERROR)
Returns the number/rank of the computational nodes.
This module contains all kind definitions.
subroutine, public mpi_error_check(ROUTINE, MPI_ERR_CODE, ERR, ERROR,)
Checks to see if an MPI error has occured during an MPI call and flags a CMISS error it if it has...