OpenCMISS-Iron Internal API Documentation
computational_environment.f90
Go to the documentation of this file.
1 
43 
45 
47 
48  USE base_routines
49  USE cmiss_mpi
50  USE cmisspetsc
51  USE constants
52  USE kinds
53 #ifndef NOMPIMOD
54  USE mpi
55 #endif
56  USE input_output
58 
59 #include "macros.h"
60 
61  IMPLICIT NONE
62 
63 #ifdef NOMPIMOD
64 #include "mpif.h"
65 #endif
66 
67  !Module parameters
68 
69  !Module types
70 
75 
78  INTEGER(INTG) :: number_computational_nodes
79  INTEGER(INTG) :: number_sub_work_groups
80  TYPE(computational_work_group_type), POINTER:: parent
81  TYPE(computational_work_group_ptr_type), ALLOCATABLE:: sub_work_groups(:)
82 
83  TYPE(computational_environment_type), POINTER :: comp_env
84  LOGICAL :: comp_env_finished
86 
87  PRIVATE
88 
91  INTEGER(INTG) :: number_levels
92  INTEGER(INTG),ALLOCATABLE :: size(:)
93  END TYPE cache_type
94 
97  INTEGER(INTG) :: number_processors
98  INTEGER(INTG) :: rank
99  !TYPE(CACHE_TYPE) :: CACHE
100  INTEGER(INTG) :: node_name_length
101  CHARACTER(LEN=MPI_MAX_PROCESSOR_NAME) :: node_name
102  END TYPE computational_node_type
103 
105 
108  INTEGER(INTG) :: mpi_type
109  INTEGER(INTG) :: num_blocks
110  INTEGER(INTG) :: block_lengths(4)
111  INTEGER(INTG) :: types(4)
112  INTEGER(MPI_ADDRESS_KIND) :: displacements(4)
114 
117  INTEGER(INTG) :: mpi_comm
118  INTEGER(INTG) :: number_computational_nodes
120  TYPE(computational_node_type), ALLOCATABLE :: computational_nodes(:)
122 
123  !Module variables
124 
127 
128  !Interfaces
129  ! Access specifiers for subroutines and interfaces(if any)
135 
136 CONTAINS
137  !
138  !================================================================================================================================
139  !
140 
142  SUBROUTINE computational_work_group_subgroup_add(PARENT_WORK_GROUP, NUMBER_COMPUTATIONAL_NODES, &
143  & added_work_group,err,error,*)
145  !Argument Variables
146  TYPE(computational_work_group_type),POINTER, INTENT(INOUT) :: PARENT_WORK_GROUP
147  TYPE(computational_work_group_type),POINTER, INTENT(INOUT) :: ADDED_WORK_GROUP
148  INTEGER(INTG),INTENT(IN) :: NUMBER_COMPUTATIONAL_NODES
149  INTEGER(INTG), INTENT(OUT) :: ERR
150  TYPE(varying_string), INTENT(OUT) :: ERROR
151  !Local Variables
152  TYPE(computational_work_group_ptr_type) NEW_WORK_GROUP
153  TYPE(computational_work_group_type),POINTER :: TMP_PARENT_WORK_GROUP
154  TYPE(computational_work_group_ptr_type), ALLOCATABLE :: SUB_WORK_GROUPS(:)
155  INTEGER(INTG):: I
156 
157  ALLOCATE(new_work_group%PTR)
158  new_work_group%PTR%NUMBER_COMPUTATIONAL_NODES = number_computational_nodes
159  new_work_group%PTR%NUMBER_SUB_WORK_GROUPS = 0
160 
161  IF(ASSOCIATED(parent_work_group)) THEN
162  ALLOCATE(sub_work_groups(parent_work_group%NUMBER_SUB_WORK_GROUPS+1))
163  DO i=1,parent_work_group%NUMBER_SUB_WORK_GROUPS
164  sub_work_groups(i)%PTR=>parent_work_group%SUB_WORK_GROUPS(i)%PTR
165  ENDDO
166  !SUB_WORK_GROUPS(1:PARENT_WORK_GROUP%NUMBER_SUB_WORK_GROUPS)=>PARENT_WORK_GROUP%SUB_WORK_GROUPS(:)
167 
168  IF(ALLOCATED(parent_work_group%SUB_WORK_GROUPS)) THEN
169  DEALLOCATE(parent_work_group%SUB_WORK_GROUPS)
170  ENDIF
171  sub_work_groups(1+parent_work_group%NUMBER_SUB_WORK_GROUPS)%PTR=>new_work_group%PTR
172  ALLOCATE(parent_work_group%SUB_WORK_GROUPS(SIZE(sub_work_groups,1)))
173  DO i=1,SIZE(sub_work_groups,1)
174  parent_work_group%SUB_WORK_GROUPS(i)%PTR => sub_work_groups(i)%PTR
175  ENDDO
176  !PARENT_WORK_GROUP%SUB_WORK_GROUPS(:) => SUB_WORK_GROUPS(:)
177 
178  DEALLOCATE(sub_work_groups)
179  parent_work_group%NUMBER_SUB_WORK_GROUPS = 1+parent_work_group%NUMBER_SUB_WORK_GROUPS
180  new_work_group%PTR%PARENT => parent_work_group
181  tmp_parent_work_group => parent_work_group
182  DO WHILE(ASSOCIATED(tmp_parent_work_group)) !Update the computational number of its ancestors
183  tmp_parent_work_group%NUMBER_COMPUTATIONAL_NODES = tmp_parent_work_group%NUMBER_COMPUTATIONAL_NODES &
184  & + new_work_group%PTR%NUMBER_COMPUTATIONAL_NODES
185  tmp_parent_work_group => tmp_parent_work_group%PARENT
186  ENDDO
187  ELSE !Top level group
188  CALL flagerror('PARENT_WORK_GROUP is not associated, call COMPUTATIONAL_WORK_GROUP_CREATE_START first',&
189  & err,error,*999)
190  ENDIF
191  added_work_group => new_work_group%PTR
192 
193  exits("COMPUTATIONAL_WORK_GROUP_SUBGROUP_ADD")
194  RETURN
195 999 errorsexits("COMPUTATIONAL_WORK_GROUP_SUBGROUP_ADD",err,error)
196  RETURN 1
198 
199  !
200  !================================================================================================================================
201  !
202 
204  SUBROUTINE computational_work_group_create_start(WORLD_WORK_GROUP,NUMBER_COMPUTATIONAL_NODES,ERR,ERROR,*)
206  !Argument Variables
207  TYPE(computational_work_group_type),POINTER, INTENT(INOUT) :: WORLD_WORK_GROUP
208  INTEGER(INTG),INTENT(IN) :: NUMBER_COMPUTATIONAL_NODES
209  INTEGER(INTG), INTENT(OUT) :: ERR
210  TYPE(varying_string), INTENT(OUT) :: ERROR
211  !Local Variables
212  TYPE(computational_work_group_ptr_type) NEW_WORK_GROUP
213 
214  IF(ASSOCIATED(world_work_group)) THEN
215  CALL flagerror('WORLD_WORK_GROUP is already associated', err, error, *999)
216  ELSE
217  ALLOCATE(new_work_group%PTR)
218  new_work_group%PTR%NUMBER_COMPUTATIONAL_NODES = number_computational_nodes
219  new_work_group%PTR%NUMBER_SUB_WORK_GROUPS = 0
220  NULLIFY(new_work_group%PTR%PARENT) !It is the highest level work group already
221  NULLIFY(new_work_group%PTR%COMP_ENV) !Generate this later in COMPUTATIONAL_WORK_GROUP_CREATE_FINISH
222  world_work_group=>new_work_group%PTR
223  ENDIF
224 
225  exits("COMPUTATIONAL_WORK_GROUP_CREATE_START")
226  RETURN
227 999 errorsexits("COMPUTATIONAL_WORK_GROUP_CREATE_START",err,error)
228  RETURN 1
230 
231  !
232  !================================================================================================================================
233  !
234 
236  RECURSIVE SUBROUTINE computational_workgroupgeneratecompenviron(WORK_GROUP,AVAILABLE_RANK_LIST,ERR,ERROR,*)
238  !Argument Variables
239  TYPE(computational_work_group_type),POINTER, INTENT(INOUT) :: WORK_GROUP
240  INTEGER(INTG), ALLOCATABLE, INTENT(INOUT) :: AVAILABLE_RANK_LIST(:)
241  INTEGER(INTG), INTENT(OUT) :: ERR
242  TYPE(varying_string), INTENT(OUT) :: ERROR
243  !Local Variables
244  INTEGER(INTG) :: I,MPI_IERROR,RANK,ORIGINAL_GROUP,NEW_GROUP
245  INTEGER(INTG), ALLOCATABLE :: NEW_AVAILABLE_RANK_LIST(:)
246 
247  enters("Computational_WorkGroupGenerateCompEnviron",err,error,*999)
248 
249  ALLOCATE(work_group%COMP_ENV)
250 
251  !Set size of computational nodes in this communicator
252  work_group%COMP_ENV%NUMBER_COMPUTATIONAL_NODES = work_group%NUMBER_COMPUTATIONAL_NODES
253 
254  !Determine my processes rank
255  CALL mpi_comm_rank(computational_environment%MPI_COMM,rank,mpi_ierror)
256  CALL mpi_error_check("MPI_COMM_RANK",mpi_ierror,err,error,*999)
257  work_group%COMP_ENV%MY_COMPUTATIONAL_NODE_NUMBER=rank
258 
259  !Fill in the information for every computational node in this group
260  ALLOCATE(work_group%COMP_ENV%COMPUTATIONAL_NODES(work_group%COMP_ENV%NUMBER_COMPUTATIONAL_NODES))
261  i=SIZE(available_rank_list,1)
262  IF(SIZE(available_rank_list,1)-work_group%COMP_ENV%NUMBER_COMPUTATIONAL_NODES < 0) THEN
263  CALL flagerror("NOT ENOUGH RANKS", err, error, *999)
264  GOTO 999
265  ENDIF
266  DO i=1,work_group%COMP_ENV%NUMBER_COMPUTATIONAL_NODES, 1
267  work_group%COMP_ENV%COMPUTATIONAL_NODES(i) = computational_environment%COMPUTATIONAL_NODES(available_rank_list(i))
268  ENDDO
269 
270  !Create a communicator
271  !CALL MPI_COMM_DUP(MPI_COMM_WORLD,WORK_GROUP%COMP_ENV%MPI_COMM,MPI_IERROR)
272  CALL mpi_comm_group(mpi_comm_world,original_group,mpi_ierror);
273  CALL mpi_error_check("MPI_COMM_RANK",mpi_ierror,err,error,*999)
274  CALL mpi_group_incl(original_group,i-1,available_rank_list(1:i-1),new_group,mpi_ierror) !Choose the first I-1 ranks
275  CALL mpi_error_check("MPI_COMM_RANK",mpi_ierror,err,error,*999)
276  CALL mpi_comm_create(mpi_comm_world,new_group,work_group%COMP_ENV%MPI_COMM,mpi_ierror)
277  CALL mpi_error_check("MPI_COMM_RANK",mpi_ierror,err,error,*999)
278  CALL mpi_group_free(original_group,mpi_ierror)
279  CALL mpi_error_check("MPI_COMM_RANK",mpi_ierror,err,error,*999)
280  CALL mpi_group_free(new_group,mpi_ierror)
281  CALL mpi_error_check("MPI_COMM_RANK",mpi_ierror,err,error,*999)
282 
283  !Shrink the AVAILABLE_RANK_LIST
284  ALLOCATE(new_available_rank_list(SIZE(available_rank_list,1)-work_group%COMP_ENV%NUMBER_COMPUTATIONAL_NODES))
285  new_available_rank_list(1:SIZE(new_available_rank_list)) = available_rank_list(i:SIZE(available_rank_list,1))
286  DEALLOCATE(available_rank_list)
287  ALLOCATE(available_rank_list(SIZE(new_available_rank_list,1)))
288  available_rank_list(:) = new_available_rank_list(:)
289 
290  work_group%COMP_ENV_FINISHED = .true.
291 
292  !Recursively do this to all its subgroups
293  DO i=1,work_group%NUMBER_SUB_WORK_GROUPS,1
294  CALL computational_workgroupgeneratecompenviron(work_group%SUB_WORK_GROUPS(i)%PTR,&
295  & available_rank_list,err,error,*999)
296  ENDDO
297 
298  exits("Computational_WorkGroupGenerateCompEnviron")
299  RETURN
300 999 errorsexits("Computational_WorkGroupGenerateCompEnviron",err,error)
301  RETURN 1
302 
304 
305  !
306  !================================================================================================================================
307  !
308 
310  SUBROUTINE computational_work_group_create_finish(WORLD_WORK_GROUP,ERR,ERROR,*)
312  !Argument Variables
313  TYPE(computational_work_group_type),POINTER,INTENT(INOUT) :: WORLD_WORK_GROUP
314  INTEGER(INTG),INTENT(OUT) :: ERR
315  TYPE(varying_string),INTENT(OUT) :: ERROR
316  !Local Variables
317  INTEGER(INTG),ALLOCATABLE:: AVAILABLE_RANK_LIST(:)
318  INTEGER(INTG) :: I
319 
320  enters("COMPUTATIONAL_WORK_GROUP_CREATE_FINISH",err,error,*999)
321 
322  !set the computational environment of the world work group to be the global COMPUTATIONAL_ENVIRONMENT (the default communicator in OpenCMISS)
323  world_work_group%COMP_ENV => computational_environment
324  world_work_group%COMP_ENV_FINISHED = .true.
325 
326  !generate the communicators for subgroups if any
327  ALLOCATE(available_rank_list(world_work_group%COMP_ENV%NUMBER_COMPUTATIONAL_NODES))
328  DO i=0,SIZE(available_rank_list,1)-1
329  available_rank_list(i+1) = i
330  END DO
331  DO i=1,world_work_group%NUMBER_SUB_WORK_GROUPS,1
332  CALL computational_workgroupgeneratecompenviron(world_work_group%SUB_WORK_GROUPS(i)%PTR,available_rank_list,err,error,*999)
333  END DO
334 
335  exits("COMPUTATIONAL_WORK_GROUP_CREATE_FINISH")
336  RETURN
337 999 errorsexits("COMPUTATIONAL_WORK_GROUP_CREATE_FINISH",err,error)
338  RETURN 1
340 
341  !
342  !================================================================================================================================
343  !
344 
346  SUBROUTINE computational_node_finalise(COMPUTATIONAL_NODE,ERR,ERROR,*)
347 
348  !Argument Variables
349  TYPE(computational_node_type),INTENT(INOUT) :: COMPUTATIONAL_NODE
350  INTEGER(INTG),INTENT(OUT) :: ERR
351  TYPE(varying_string),INTENT(OUT) :: ERROR
352  !Local Variables
353 
354  enters("COMPUTATIONAL_NODE_FINALISE",err,error,*999)
355 
356  computational_node%NUMBER_PROCESSORS=0
357  computational_node%RANK=-1
358  computational_node%NODE_NAME_LENGTH=0
359  computational_node%NODE_NAME=""
360 
361  exits("COMPUTATIONAL_NODE_FINALISE")
362  RETURN
363 999 errorsexits("COMPUTATIONAL_NODE_FINALISE",err,error)
364  RETURN 1
365  END SUBROUTINE computational_node_finalise
366 
367  !
368  !================================================================================================================================
369  !
370 
372  SUBROUTINE computational_node_initialise(COMPUTATIONAL_NODE,RANK,ERR,ERROR,*)
373 
374  !Argument Variables
375  TYPE(computational_node_type), INTENT(OUT) :: COMPUTATIONAL_NODE
376  INTEGER(INTG), INTENT(IN) :: RANK
377  INTEGER(INTG), INTENT(OUT) :: ERR
378  TYPE(varying_string), INTENT(OUT) :: ERROR
379  !Local Variables
380  INTEGER(INTG) :: MPI_IERROR
381 
382  enters("COMPUTATIONAL_NODE_INITIALISE",err,error,*999)
383 
384 ! COMPUTATIONAL_NODE%NUMBER_PROCESSORS=COMP_DETECT_NUMBER_PROCESSORS(ERR)
385 ! IF(ERR/=0) GOTO 999
386  computational_node%NUMBER_PROCESSORS=1
387  computational_node%RANK=rank
388  CALL mpi_get_processor_name(computational_node%NODE_NAME,computational_node%NODE_NAME_LENGTH,mpi_ierror)
389  CALL mpi_error_check("MPI_GET_PROCESSOR_NAME",mpi_ierror,err,error,*999)
390 
391  exits("COMPUTATIONAL_NODE_INITIALISE")
392  RETURN
393 999 errorsexits("COMPUTATIONAL_NODE_INITIALISE",err,error)
394  RETURN 1
395  END SUBROUTINE computational_node_initialise
396 
397  !
398  !================================================================================================================================
399  !
400 
402  SUBROUTINE computational_node_mpi_type_finalise(ERR,ERROR,*)
403 
404  !Argument Variables
405  INTEGER(INTG), INTENT(OUT) :: ERR
406  TYPE(varying_string), INTENT(OUT) :: ERROR
407  !Local Variables
408  INTEGER(INTG) :: i,MPI_IERROR
409 
410  enters("COMPUTATIONAL_NODE_MPI_TYPE_FINALISE",err,error,*999)
411 
412  DO i=1,mpi_computational_node_type_data%NUM_BLOCKS
414  mpi_computational_node_type_data%BLOCK_LENGTHS(i)=0
415  mpi_computational_node_type_data%DISPLACEMENTS(i)=0
416  ENDDO !i
418 
419  IF(mpi_computational_node_type_data%MPI_TYPE/=mpi_datatype_null) THEN
420  CALL mpi_type_free(mpi_computational_node_type_data%MPI_TYPE,mpi_ierror)
421  CALL mpi_error_check("MPI_TYPE_FREE",mpi_ierror,err,error,*999)
422  ENDIF
423 
424  exits("COMPUTATIONAL_NODE_MPI_TYPE_FINALISE")
425  RETURN
426 999 errorsexits("COMPUTATIONAL_NODE_MPI_TYPE_FINALISE",err,error)
427  RETURN 1
429 
430  !
431  !================================================================================================================================
432  !
433 
435  SUBROUTINE computational_node_mpi_type_initialise(COMPUTATIONAL_NODE,ERR,ERROR,*)
436 
437  !Argument Variables
438  TYPE(computational_node_type), INTENT(IN) :: COMPUTATIONAL_NODE
439  INTEGER(INTG), INTENT(OUT) :: ERR
440  TYPE(varying_string), INTENT(OUT) :: ERROR
441  !Local Variables
442  INTEGER(INTG) :: I,MPI_IERROR
443 
444  enters("COMPUTATIONAL_NODE_MPI_TYPE_INITIALISE",err,error,*999)
445 
446  mpi_computational_node_type_data%MPI_TYPE=mpi_datatype_null
447 
449  mpi_computational_node_type_data%TYPES=(/mpi_integer,mpi_integer,mpi_integer,mpi_character/)
450  mpi_computational_node_type_data%BLOCK_LENGTHS=(/1,1,1,mpi_max_processor_name/)
451 
452 
453  CALL mpi_get_address(computational_node%NUMBER_PROCESSORS,mpi_computational_node_type_data%DISPLACEMENTS(1),mpi_ierror)
454  CALL mpi_error_check("MPI_GET_ADDRESS",mpi_ierror,err,error,*999)
455  CALL mpi_get_address(computational_node%RANK,mpi_computational_node_type_data%DISPLACEMENTS(2),mpi_ierror)
456  CALL mpi_error_check("MPI_GET_ADDRESS",mpi_ierror,err,error,*999)
457  CALL mpi_get_address(computational_node%NODE_NAME_LENGTH,mpi_computational_node_type_data%DISPLACEMENTS(3),mpi_ierror)
458  CALL mpi_error_check("MPI_GET_ADDRESS",mpi_ierror,err,error,*999)
459  !CPB 19/02/07 AIX compiler complains about the type of the first parameter i.e., the previous 3 have been integers
460  !and this one is not so cast the type.
461  CALL mpi_get_address(computational_node%NODE_NAME,mpi_computational_node_type_data%DISPLACEMENTS(4),mpi_ierror)
462  CALL mpi_error_check("MPI_GET_ADDRESS",mpi_ierror,err,error,*999)
463 
464  DO i=4,1,-1
465  mpi_computational_node_type_data%DISPLACEMENTS(i)=mpi_computational_node_type_data%DISPLACEMENTS(i)- &
466  & mpi_computational_node_type_data%DISPLACEMENTS(1)
467  ENDDO !i
468 
469  CALL mpi_type_create_struct(mpi_computational_node_type_data%NUM_BLOCKS,mpi_computational_node_type_data%BLOCK_LENGTHS, &
471  & mpi_computational_node_type_data%MPI_TYPE,mpi_ierror)
472  CALL mpi_error_check("MPI_TYPE_CREATE_STRUCT",mpi_ierror,err,error,*999)
473 
474  CALL mpi_type_commit(mpi_computational_node_type_data%MPI_TYPE, mpi_ierror)
475  CALL mpi_error_check("MPI_TYPE_COMMIT",mpi_ierror,err,error,*999)
476 
477  IF(diagnostics3) THEN
478  CALL write_string(diagnostic_output_type,"MPI Computational Node Type Data:",err,error,*999)
479  CALL write_string_value(diagnostic_output_type," MPI type = ",mpi_computational_node_type_data%MPI_TYPE,err,error,*999)
480  CALL write_string_value(diagnostic_output_type," Number blocks = ",mpi_computational_node_type_data%NUM_BLOCKS, &
481  & err,error,*999)
483  & mpi_computational_node_type_data%TYPES,'(" Block types =",4(X,I15))','(15X,4(X,I15))',err,error,*999)
485  & mpi_computational_node_type_data%BLOCK_LENGTHS,'(" Block lengths =",8(X,I5))','(17X,8(X,I5))',err,error,*999)
487  & mpi_computational_node_type_data%DISPLACEMENTS,'(" Displacements =",8(X,I5))','(17X,8(X,I5))',err,error,*999)
488  ENDIF
489 
490  exits("COMPUTATIONAL_NODE_MPI_TYPE_INITIALISE")
491  RETURN
492 999 CALL computational_node_mpi_type_finalise(err,error,*998)
493 998 errorsexits("COMPUTATIONAL_NODE_MPI_TYPE_INITIALISE",err,error)
494  RETURN 1
496 
497  !
498  !================================================================================================================================
499  !
500 
502  SUBROUTINE computational_environment_finalise(ERR,ERROR,*)
504  !Argument Variables
505  INTEGER(INTG), INTENT(OUT) :: ERR
506  TYPE(varying_string), INTENT(OUT) :: ERROR
507  !Local Variables
508  INTEGER(INTG) :: COMPUTATIONAL_NODE,MPI_IERROR
509 
510  enters("COMPUTATIONAL_ENVIRONMENT_FINALISE",err,error,*999)
511 
512  IF(ALLOCATED(computational_environment%COMPUTATIONAL_NODES)) THEN
513  DO computational_node=0,computational_environment%NUMBER_COMPUTATIONAL_NODES-1
514  CALL computational_node_finalise(computational_environment%COMPUTATIONAL_NODES(computational_node),err,error,*999)
515  ENDDO
516  DEALLOCATE(computational_environment%COMPUTATIONAL_NODES)
517  ENDIF
518  computational_environment%NUMBER_COMPUTATIONAL_NODES=0
519 
520  CALL computational_node_mpi_type_finalise(err,error,*999)
521 
522  CALL mpi_comm_free(computational_environment%MPI_COMM,mpi_ierror)
523  CALL mpi_error_check("MPI_COMM_FREE",mpi_ierror,err,error,*999)
524 
525  !Finalise PetSc
526  !Call this after MPI_COMM_FREE as PETSc routines are called when some
527  !MPI comm attributes are freed.
528  !CALL Petsc_LogView(PETSC_COMM_WORLD,"OpenCMISSTest.petsc",ERR,ERROR,*999)
529  CALL petsc_finalise(err,error,*999)
530 
531  CALL mpi_finalize(mpi_ierror)
532  CALL mpi_error_check("MPI_FINALIZE",mpi_ierror,err,error,*999)
533 
534  exits("COMPUTATIONAL_ENVIRONMENT_FINALISE")
535  RETURN
536 999 errorsexits("COMPUTATIONAL_ENVIRONMENT_FINALISE",err,error)
537  RETURN 1
539 
540  !
541  !================================================================================================================================
542  !
543 
545  SUBROUTINE computational_environment_initialise(ERR,ERROR,*)
547  !Argument Variables
548  INTEGER(INTG), INTENT(OUT) :: ERR
549  TYPE(varying_string), INTENT(OUT) :: ERROR
550  !Local Variables
551  INTEGER(INTG) :: i,DUMMY_ERR,MPI_IERROR,RANK
552  TYPE(varying_string) :: DUMMY_ERROR
553 
554  enters("COMPUTATIONAL_ENVIRONMENT_INITIALISE",err,error,*999)
555 
556  !Initialise the MPI environment
557  CALL mpi_init(mpi_ierror)
558  CALL mpi_error_check("MPI_INIT",mpi_ierror,err,error,*999)
559 
560  !Create a (private) communicator for cmiss. For now just duplicate MPI_COMM_WORLD
561  CALL mpi_comm_dup(mpi_comm_world,computational_environment%MPI_COMM,mpi_ierror)
562  CALL mpi_error_check("MPI_COMM_DUP",mpi_ierror,err,error,*999)
563 
564  !Determine the number of ranks/computational nodes we have in our computational environment
565  CALL mpi_comm_size(computational_environment%MPI_COMM,computational_environment%NUMBER_COMPUTATIONAL_NODES,mpi_ierror)
566  CALL mpi_error_check("MPI_COMM_SIZE",mpi_ierror,err,error,*999)
567 
568  !Allocate the computational node data structures
569  ALLOCATE(computational_environment%COMPUTATIONAL_NODES(0:computational_environment%NUMBER_COMPUTATIONAL_NODES-1),stat=err)
570  IF(err /=0) CALL flagerror("Could not allocate computational nodes",err,error,*999)
571 
572  !Determine my processes rank
573  CALL mpi_comm_rank(computational_environment%MPI_COMM,rank,mpi_ierror)
574  CALL mpi_error_check("MPI_COMM_RANK",mpi_ierror,err,error,*999)
575  computational_environment%MY_COMPUTATIONAL_NODE_NUMBER=rank
576 
577 #ifdef TAUPROF
578  CALL tau_profile_set_node(rank)
579 #endif
580 
581  !Create the MPI type information for the COMPUTATIONAL_NODE_TYPE
582  CALL computational_node_mpi_type_initialise(computational_environment%COMPUTATIONAL_NODES(rank),err,error,*999)
583  !Fill in all the computational node data structures for this rank at the root position (will be changed later with an
584  !allgather call)
585  CALL computational_node_initialise(computational_environment%COMPUTATIONAL_NODES(0),rank,err,error,*999)
586 
587 ! !Now transfer all the computational node information to the other computational nodes so that each rank has all the
588 ! !information.
589 ! !! CALL MPI_ALLGATHER(COMPUTATIONAL_ENVIRONMENT%COMPUTATIONAL_NODES(0),1,MPI_COMPUTATIONAL_NODE_TYPE_DATA%MPI_TYPE, &
590 ! !! & COMPUTATIONAL_ENVIRONMENT%COMPUTATIONAL_NODES(0),1,MPI_COMPUTATIONAL_NODE_TYPE_DATA%MPI_TYPE, &
591 ! !! & COMPUTATIONAL_ENVIRONMENT%MPI_COMM,MPI_IERROR)
592 ! CALL MPI_ALLGATHER(MPI_IN_PLACE,1,MPI_COMPUTATIONAL_NODE_TYPE_DATA%MPI_TYPE, &
593 ! & COMPUTATIONAL_ENVIRONMENT%COMPUTATIONAL_NODES(0),1,MPI_COMPUTATIONAL_NODE_TYPE_DATA%MPI_TYPE, &
594 ! & COMPUTATIONAL_ENVIRONMENT%MPI_COMM,MPI_IERROR)
595 ! CALL WRITE_STRING(DIAGNOSTIC_OUTPUT_TYPE," Calling MPI_ERROR_CHECK...",ERR,ERROR,*999)
596 ! CALL MPI_ERROR_CHECK("MPI_ALLGATHER",MPI_IERROR,ERR,ERROR,*999)
597 
598  !Initialise node numbers in base routines.
600  & number_computational_nodes,err,error,*999)
601 
602  !Initialise PETSc
603  CALL petsc_initialise(petsc_null_character,err,error,*999)
604 
605  IF(diagnostics1) THEN
606  !Just let the master node write out this information
607  IF(rank==0) THEN
608  CALL write_string(diagnostic_output_type,"Computational environment:",err,error,*999)
609  CALL write_string_value(diagnostic_output_type," Number of computational nodes = ", &
610  & computational_environment%NUMBER_COMPUTATIONAL_NODES,err,error,*999)
611  CALL write_string_value(diagnostic_output_type," My computational node number = ", &
612  & computational_environment%MY_COMPUTATIONAL_NODE_NUMBER,err,error,*999)
613  IF(diagnostics2) THEN
614  DO i=0,computational_environment%NUMBER_COMPUTATIONAL_NODES-1
615  CALL write_string(diagnostic_output_type," Computational Node:",err,error,*999)
616  CALL write_string_value(diagnostic_output_type," Number of Processors = ", &
617  & computational_environment%COMPUTATIONAL_NODES(i)%NUMBER_PROCESSORS,err,error,*999)
618  CALL write_string_value(diagnostic_output_type," MPI rank = ", &
619  & computational_environment%COMPUTATIONAL_NODES(i)%RANK,err,error,*999)
620  CALL write_string_value(diagnostic_output_type," Node Name = ", &
621  & computational_environment%COMPUTATIONAL_NODES(i)%NODE_NAME,err,error,*999)
622  ENDDO !i
623  ENDIF
624  ENDIF
625  ENDIF
626 
627  exits("COMPUTATIONAL_ENVIRONMENT_INITIALISE")
628  RETURN
629 999 CALL computational_environment_finalise(dummy_err,dummy_error,*998)
630 998 errorsexits("COMPUTATIONAL_ENVIRONMENT_INITIALISE",err,error)
631  RETURN 1
633 
634  !
635  !================================================================================================================================
636  !
637 
639  FUNCTION computational_node_number_get(ERR,ERROR)
640 
641  !Argument Variables
642  INTEGER(INTG), INTENT(OUT) :: ERR
643  TYPE(varying_string), INTENT(OUT) :: ERROR
644  !Function variable
645  INTEGER(INTG) :: COMPUTATIONAL_NODE_NUMBER_GET
646  !Local Variables
647 
648  enters("COMPUTATIONAL_NODE_NUMBER_GET",err,error,*999)
649 
650  IF(ALLOCATED(computational_environment%COMPUTATIONAL_NODES)) THEN
651  computational_node_number_get=computational_environment%MY_COMPUTATIONAL_NODE_NUMBER
652  ELSE
653  CALL flagerror("Computational environment not initialised",err,error,*999)
654  ENDIF
655 
656  exits("COMPUTATIONAL_NODE_NUMBER_GET")
657  RETURN
658 999 errorsexits("COMPUTATIONAL_NODE_NUMBER_GET",err,error)
659  RETURN
660  END FUNCTION computational_node_number_get
661 
662  !
663  !================================================================================================================================
664  !
665 
667  FUNCTION computational_nodes_number_get(ERR,ERROR)
668 
669  !Argument Variables
670  INTEGER(INTG), INTENT(OUT) :: ERR
671  TYPE(varying_string), INTENT(OUT) :: ERROR
672  !Function variable
673  INTEGER(INTG) :: COMPUTATIONAL_NODES_NUMBER_GET
674  !Local Variables
675 
676  enters("COMPUTATIONAL_NODES_NUMBER_GET",err,error,*999)
677 
678  IF(ALLOCATED(computational_environment%COMPUTATIONAL_NODES)) THEN
679  computational_nodes_number_get=computational_environment%NUMBER_COMPUTATIONAL_NODES
680  ELSE
681  CALL flagerror("Computational environment not initialised",err,error,*999)
682  ENDIF
683 
684  exits("COMPUTATIONAL_NODES_NUMBER_GET")
685  RETURN
686 999 errorsexits("COMPUTATIONAL_NODES_NUMBER_GET",err,error)
687  RETURN
688  END FUNCTION computational_nodes_number_get
689 
690  !
691  !================================================================================================================================
692  !
693 
694 END MODULE comp_environment
subroutine, public computational_work_group_create_start(WORLD_WORK_GROUP, NUMBER_COMPUTATIONAL_NODES, ERR, ERROR,)
Create the highest level work group (Default: GROUP_WORLD)
subroutine, public enters(NAME, ERR, ERROR,)
Records the entry into the named procedure and initialises the error code.
Contains information on the MPI type to transfer information about a computational node...
Write a string followed by a value to a given output stream.
integer, parameter ptr
Pointer integer kind.
Definition: kinds.f90:58
Contains information on the computational environment the program is running in.
This module is a CMISS buffer module to the PETSc library.
Definition: cmiss_petsc.f90:45
subroutine, public computational_work_group_create_finish(WORLD_WORK_GROUP, ERR, ERROR,)
Generate the hierarchy computational environment based on work group tree.
subroutine, public computational_environment_finalise(ERR, ERROR,)
Finalises the computational environment data structures and deallocates all memory.
Contains information on a cache heirarchy.
subroutine computational_node_initialise(COMPUTATIONAL_NODE, RANK, ERR, ERROR,)
Initialises the computational node data structures.
subroutine computational_node_mpi_type_initialise(COMPUTATIONAL_NODE, ERR, ERROR,)
Initialises the data structure containing the MPI type information for the COMPUTATIONAL_NODE_TYPE.
logical, save, public diagnostics2
.TRUE. if level 2 diagnostic output is active in the current routine
This module provides an iso_varying_string module, conformant to the API specified in ISO/IEC 1539-2:...
This module contains all program wide constants.
Definition: constants.f90:45
subroutine, public computational_work_group_subgroup_add(PARENT_WORK_GROUP, NUMBER_COMPUTATIONAL_NODES, ADDED_WORK_GROUP, ERR, ERROR,)
Add the work sub-group to the parent group based on the computational requirements (called by user) ...
Contains information on logical working groups (added by Robert on 01/04/2010)
Contains information on a computational node containing a number of processors.
!>pointer type to COMPUTATIONAL_WORK_GROUP_TYPE
logical, save, public diagnostics3
.TRUE. if level 3 diagnostic output is active in the current routine
subroutine, public exits(NAME)
Records the exit out of the named procedure.
This module contains all type definitions in order to avoid cyclic module references.
Definition: types.f90:70
Write a string to a given output stream.
This module contains all the low-level base routines e.g., all debug, control, and low-level communic...
subroutine, public computational_node_numbers_set(MY_NODE_NUMBER, NUMBER_OF_NODES, ERR, ERROR,)
Set the computational node numbers. Note: this is done as a subroutine as COMPUTATIONAL_ENVIRONMENT d...
integer(intg) function, public computational_nodes_number_get(ERR, ERROR)
Returns the number of computational nodes.
subroutine computational_node_mpi_type_finalise(ERR, ERROR,)
Finalises the data structure containing the MPI type information for the COMPUTATIONAL_NODE_TYPE.
This module contains all computational environment variables.
This module contains CMISS MPI routines.
Definition: cmiss_mpi.f90:45
integer(intg), save my_computational_node_number
The computational rank for this node.
recursive subroutine computational_workgroupgeneratecompenviron(WORK_GROUP, AVAILABLE_RANK_LIST, ERR, ERROR,)
Generate computational environment for current level work group tree and all it&#39;s subgroups recursive...
type(computational_environment_type), target, public computational_environment
The computational environment the program is running in.
subroutine computational_node_finalise(COMPUTATIONAL_NODE, ERR, ERROR,)
Finalises the computational node data structures and deallocates all memory.
subroutine, public computational_environment_initialise(ERR, ERROR,)
Initialises the computational environment data structures.
subroutine, public petsc_initialise(file, err, error,)
Buffer routine to the PETSc PetscInitialize routine.
type(mpi_computational_node_type) mpi_computational_node_type_data
The MPI data on the computational nodes.
logical, save, public diagnostics1
.TRUE. if level 1 diagnostic output is active in the current routine
Write a string followed by a vector to a specified output stream.
integer(intg), parameter, public diagnostic_output_type
Diagnostic output type.
subroutine, public petsc_finalise(err, error,)
Buffer routine to the PETSc PetscFinalize routine.
Flags an error condition.
integer(intg) function, public computational_node_number_get(ERR, ERROR)
Returns the number/rank of the computational nodes.
This module contains all kind definitions.
Definition: kinds.f90:45
subroutine, public mpi_error_check(ROUTINE, MPI_ERR_CODE, ERR, ERROR,)
Checks to see if an MPI error has occured during an MPI call and flags a CMISS error it if it has...
Definition: cmiss_mpi.f90:84
This module handles all formating and input and output.