genetic/src/mpi_fit_MeX.f

204 lines
6.3 KiB
Fortran

#ifdef mpi_version
subroutine mpi_rest_control(micit,npar)
use mpi
implicit none
! global permanent data (only transferred once)
integer npar
integer mfit
integer micit
integer ma(npar)
integer ierror
integer i
integer mode
logical runner
integer status(MPI_STATUS_SIZE)
! do loop around this, checking for next fit or finish
call bcastact(ma,npar)
call MPI_Bcast(mfit, 1, MPI_INT, 0, MPI_COMM_WORLD,ierror)
runner=.true.
do while(runner)
call MPI_Recv(mode, 1, MPI_INTEGER, 0, 69, MPI_COMM_WORLD,
$ status,ierror)
if(mode.ne.0) then
call mpi_fit_single_set(npar,mfit,micit,ma,mode)
else
runner=.false.
endif
end do
call MPI_Barrier(MPI_COMM_WORLD, ierror)
end
!-----------------------------------------------
c this does a single crunch of data
subroutine mpi_fit_single_set(npar,mfit,micit,ma,nset)
use mpi
use dim_parameter, only: lbfgs
use marq_mod,only: mrqmin
use lbfgsb_mod,only: lbfgs_driver
implicit none
integer npar,mfit,micit,ierror,ma(*)
integer status(MPI_STATUS_SIZE), nset, my_rank
double precision par(npar), rms, startzeit, endzeit
startzeit = MPI_Wtime()
! receive data via blocking receive
call MPI_Recv(par, npar, MPI_DOUBLE_PRECISION, 0, 13,
$ MPI_COMM_WORLD, status, ierror)
call MPI_Recv(rms, 1, MPI_DOUBLE_PRECISION, 0, 14,
$ MPI_COMM_WORLD, status, ierror)
if(lbfgs) then
call lbfgs_driver(par,npar,ma,mfit,
& rms,micit,nset)
else
call mrqmin(par,npar,ma,mfit,
& rms,micit,nset)
endif
call MPI_Comm_rank(MPI_COMM_WORLD, my_rank, ierror)
! send back data via blocking send
call MPI_Send(par, npar, MPI_DOUBLE_PRECISION, 0, 23,
$ MPI_COMM_WORLD, ierror)
call MPI_Send(rms, 1, MPI_DOUBLE_PRECISION, 0, 24, MPI_COMM_WORLD,
$ ierror)
endzeit = MPI_Wtime()
write(6,*) 'Thread', my_rank ,'Zeit:', endzeit-startzeit
!> Write output for the spezific set of parameters
write(6,99) nset, rms, rms*219474.69d0
99 format('Set:',i5,5x,'RMS:',g16.6,3x,'(',f16.6,' cm-1)')
end
!-----------------------------------------------
subroutine bcastact(act,len)
use mpi
implicit none
integer len
integer act(len)
integer ierror
call MPI_Bcast(act, len, MPI_INT, 0, MPI_COMM_WORLD,ierror)
end
!-----------------------------------------------
subroutine workshare(numthreads, par, rms, npar, nset)
use mpi
implicit none
integer numthreads,ierror,nset,npar
double precision, asynchronous :: par(npar,nset),rms(nset)
logical working(numthreads-1)
logical sent,received_rms,received_par,received
integer request_par(8,numthreads-1)
integer request_rms(8,numthreads-1)
integer ping(8)
integer nextworker
integer i,j,k
integer worksignal
integer status(MPI_STATUS_SIZE)
integer (kind=MPI_ADDRESS_KIND) :: iadummy
! init working array
do i = 1,numthreads
working(i) = .false.
enddo
do i = 1,nset
! do a round of sending
sent=.false.
do while(.not.sent)
do j = 1,numthreads-1
if(.not.working(j)) then
working(j)=.true.
nextworker = j
sent=.true.
exit
endif
enddo
if(sent) then
call MPI_Issend(i, 1, MPI_INTEGER,
$ nextworker, 69, MPI_COMM_WORLD, ping(1), ierror)
call MPI_Issend(par(1,i), npar, MPI_DOUBLE_PRECISION,
$ nextworker, 13, MPI_COMM_WORLD, request_par(1
$ ,nextworker), ierror)
call MPI_Issend(rms(i), 1, MPI_DOUBLE_PRECISION,
$ nextworker, 14, MPI_COMM_WORLD, request_rms(1
$ ,nextworker), ierror)
! wait for Issend to finish (Hannes initial position for these statements --> runs parallel)
call MPI_Wait(ping(1), status, ierror)
call MPI_Wait(request_par(1,nextworker), status, ierror)
call MPI_Wait(request_rms(1,nextworker), status, ierror)
call MPI_Irecv(par(1,i), npar, MPI_DOUBLE_PRECISION,
$ nextworker, 23, MPI_COMM_WORLD, request_par(1
$ ,nextworker) , ierror)
call MPI_Irecv(rms(i), 1, MPI_DOUBLE_PRECISION,
$ nextworker, 24, MPI_COMM_WORLD, request_rms(1
$ ,nextworker), ierror)
endif
! check finished workers
do j = 1,numthreads-1
if(working(j)) then
received_rms=.false.
received_par=.false.
call MPI_Test(request_par(1,j), received_rms,
$ status, ierror)
call MPI_Test(request_rms(1,j), received_par,
$ status, ierror)
if(received_par.and.received_rms) then
working(j) = .false.
endif
endif
enddo
enddo
enddo
received = .false.
do while(.not.received)
do j = 1,numthreads-1
if(working(j)) then
received_rms=.false.
received_par=.false.
call MPI_Test(request_par(1,j), received_rms,
$ MPI_STATUS_IGNORE, ierror)
call MPI_Test(request_rms(1,j), received_par,
$ MPI_STATUS_IGNORE, ierror)
if(received_par.and.received_rms) then
working(j) = .false.
endif
endif
enddo
received=.true.
do j = 1,numthreads-1
if(working(j)) then
received = .false.
exit
endif
enddo
enddo
end
#endif