2016-11-15 2 views
0

do 루프에서 데이터를 보내고받는 데 문제가 있습니다.루프에서 데이터를 보내고받는 방법

include 'mpif.h' 
    parameter (NRA = 4) 
    parameter (NCA = 4) 
    parameter (MASTER = 0) 
    parameter (FROM_MASTER = 1) 
    parameter (FROM_WORKER = 2) 

    integer numtasks,taskid,numworkers,source,dest,mtype, 
&   cols,avecol,extra, offset,i,j,k,ierr,rc 
    integer status(MPI_STATUS_SIZE) 
    real*8 a(NRA,NCA) 

    call MPI_INIT(ierr) 
    call MPI_COMM_RANK(MPI_COMM_WORLD, taskid, ierr) 
    call MPI_COMM_SIZE(MPI_COMM_WORLD, numtasks, ierr) 
    numworkers = numtasks-1 
    print *, 'task ID= ',taskid 
C *************************** master task ************************************* 
    if (taskid .eq. MASTER) then 
    if (numworkers .NE. 2) then 
    print *, 'Please use 3 processors' 
    print *,'Quitting...' 
    call MPI_ABORT(MPI_COMM_WORLD,rc,ierr) 
    endif 
C  Initialize A and B 
    do 30 i=1, NRA 
     do 30 j=1, NCA 
     a(i,j) = (i-1)+(j-1) 
30  continue 
C  Send matrix data to the worker tasks 
    avecol = NCA/numworkers 
    extra = mod(NCA,numworkers) 
    offset = 1 
    mtype = FROM_MASTER 
    do 50 dest=1, numworkers 
     if (dest .le. extra) then 
     cols = avecol + 1 
     else 
     cols = avecol 
     endif 
     write(*,*)' sending',cols,' cols to task',dest 
     call MPI_SEND(offset,1,MPI_INTEGER,dest,mtype, 
&     MPI_COMM_WORLD,ierr) 
     call MPI_SEND(cols,1,MPI_INTEGER,dest,mtype, 
&     MPI_COMM_WORLD,ierr) 
     call MPI_SEND(a(1,offset),cols*NRA,MPI_DOUBLE_PRECISION, 
&     dest,mtype,MPI_COMM_WORLD,ierr) 
     offset = offset + cols 
50  continue 
C  Receive results from worker tasks 
    mtype = FROM_WORKER 
    do 60 i=1, numworkers 
     source = i 
     call MPI_RECV(offset,1,MPI_INTEGER,source, 
&     mtype,MPI_COMM_WORLD,status,ierr) 
     call MPI_RECV(cols,1,MPI_INTEGER,source, 
&     mtype,MPI_COMM_WORLD,status,ierr) 
     call MPI_RECV(a(1,offset),cols*NRA,MPI_DOUBLE_PRECISION, 
&     source,mtype,MPI_COMM_WORLD,status,ierr) 
60  continue 
C  Print results 
    do 90 i=1, NRA 
     do 80 j = 1, NCA 
     write(*,70)a(i,j) 
70  format(2x,f8.2,$) 
80  continue 
     print *, ' ' 
90 continue 
    endif 
C *************************** worker task ************************************* 
    if (taskid > MASTER) then 
C  Receive matrix data from master task 
    mtype = FROM_MASTER 
    call MPI_RECV(offset,1,MPI_INTEGER,MASTER, 
&     mtype,MPI_COMM_WORLD,status,ierr) 
    call MPI_RECV(cols,1,MPI_INTEGER,MASTER, 
&     mtype,MPI_COMM_WORLD,status,ierr) 
    call MPI_RECV(a(1,offset),cols*NCA,MPI_DOUBLE_PRECISION,MASTER, 
&     mtype,MPI_COMM_WORLD,status,ierr) 
start0 = offset 
end0 = offset+cols-1 
C  Do matrix multiply 
    do t=1,5 
     do i=1, NRA 
     do j=start0,end0 
      a(i,j) = a(i,j)*t 
     enddo 
     enddo 
C  Send results back to master task 
    mtype = FROM_WORKER 
    call MPI_SEND(offset,1,MPI_INTEGER,MASTER,mtype, 
&     MPI_COMM_WORLD,ierr) 
    call MPI_SEND(cols,1,MPI_INTEGER,MASTER,mtype, 
&     MPI_COMM_WORLD,ierr) 
    call MPI_SEND(a(1,offset),cols*NCA,MPI_DOUBLE_PRECISION,MASTER, 
&     mtype,MPI_COMM_WORLD,ierr) 
enddo 
    endif 
    call MPI_FINALIZE(ierr) 
    end 

내가 매트릭스 a는 할 루프 내부에 화면에있는 모든 시간을 인쇄 할 : 아래의 코드를 확인하십시오. 코드를 실행하면 do 루프 (t = 1)의 처음 한 번만 인쇄됩니다. 이 코드를 수정하는 방법, 매트릭스가 계산 될 때마다 화면에 매번 a이 인쇄되도록 할 수 있습니다.

+0

스택 오버플로 환영합니다. 질문을 간결하게하십시오. 자신에 대한 소개를 피하고 사전에 인사와 감사를 피하십시오. 프로그램의 결과를 보여주고 원하는 것을 설명하는 것이 좋습니다. –

+0

@VladimirF 행렬 a는 [0 1 2 3입니다. 1 2 3 4; 2 3 4 5; ]. 루프에서 1에서 5까지 곱해진다. 나는 결과를 1 * a, 2 * a, .. 5 *와 같이 화면에 인쇄 할 것을 기대하고있다. 그러나 현재에는 1 * a 만 인쇄됩니다. –

답변

0

알겠습니다. 슬레이브에서 데이터를받는 동안 마스터에 루프를 넣어야합니다. 수정 된 코드

include 'mpif.h' 

    parameter (NRA = 4) 
    parameter (NCA = 4) 
    parameter (MASTER = 0) 
    parameter (FROM_MASTER = 1) 
    parameter (FROM_WORKER = 2) 

    integer numtasks,taskid,numworkers,source,dest,mtype, 
&   cols,avecol,extra, offset,i,j,k,ierr,rc 
    integer status(MPI_STATUS_SIZE) 
    real*8 a(NRA,NCA) 

    call MPI_INIT(ierr) 
    call MPI_COMM_RANK(MPI_COMM_WORLD, taskid, ierr) 
    call MPI_COMM_SIZE(MPI_COMM_WORLD, numtasks, ierr) 
    numworkers = numtasks-1 
    print *, 'task ID= ',taskid 

    C *************************** master task ************************************* 
    if (taskid .eq. MASTER) then 
    if (numworkers .NE. 2) then 
    print *, 'Please use 3 processors' 
    print *,'Quitting...' 
    call MPI_ABORT(MPI_COMM_WORLD,rc,ierr) 
    endif 
    C  Initialize A and B 
    do 30 i=1, NRA 
     do 30 j=1, NCA 
     a(i,j) = (i-1)+(j-1) 
30  continue 

C  Send matrix data to the worker tasks 
    avecol = NCA/numworkers 
    extra = mod(NCA,numworkers) 
    offset = 1 
    mtype = FROM_MASTER 
    do 50 dest=1, numworkers 
     if (dest .le. extra) then 
     cols = avecol + 1 
     else 
     cols = avecol 
     endif 
     write(*,*)' sending',cols,' cols to task',dest 
     call MPI_SEND(offset,1,MPI_INTEGER,dest,mtype, 
&     MPI_COMM_WORLD,ierr) 
     call MPI_SEND(cols,1,MPI_INTEGER,dest,mtype, 
&     MPI_COMM_WORLD,ierr) 
     call MPI_SEND(a(1,offset),cols*NRA,MPI_DOUBLE_PRECISION, 
&     dest,mtype,MPI_COMM_WORLD,ierr) 
     offset = offset + cols 
50  continue 

C  Receive results from worker tasks 
    do t = 1,5 
    mtype = FROM_WORKER 
    do 60 i=1, numworkers 
     source = i 
     call MPI_RECV(offset,1,MPI_INTEGER,source, 
&     mtype,MPI_COMM_WORLD,status,ierr) 
     call MPI_RECV(cols,1,MPI_INTEGER,source, 
&     mtype,MPI_COMM_WORLD,status,ierr) 
     call MPI_RECV(a(1,offset),cols*NRA,MPI_DOUBLE_PRECISION, 
&     source,mtype,MPI_COMM_WORLD,status,ierr) 
60  continue 
C  Print results 
    do 90 i=1, NRA 
     do 80 j = 1, NCA 
     write(*,70)a(i,j) 
70  format(2x,f8.2,$) 
80  continue 
     print *, ' ' 
90 continue 
    end do 


    endif 

C *************************** worker task ************************************* 
    if (taskid > MASTER) then 
C  Receive matrix data from master task 
    mtype = FROM_MASTER 
    call MPI_RECV(offset,1,MPI_INTEGER,MASTER, 
&     mtype,MPI_COMM_WORLD,status,ierr) 
    call MPI_RECV(cols,1,MPI_INTEGER,MASTER, 
&     mtype,MPI_COMM_WORLD,status,ierr) 
    call MPI_RECV(a(1,offset),cols*NCA,MPI_DOUBLE_PRECISION,MASTER, 
&     mtype,MPI_COMM_WORLD,status,ierr) 
start0 = offset 
end0 = offset+cols-1 

C  Do matrix multiply 
    do t=1,5 
     do i=1, NRA 
     do j=start0,end0 
      a(i,j) = a(i,j)*t 
     enddo 
     enddo  
C  Send results back to master task 
    mtype = FROM_WORKER 
    call MPI_SEND(offset,1,MPI_INTEGER,MASTER,mtype, 
&     MPI_COMM_WORLD,ierr) 
    call MPI_SEND(cols,1,MPI_INTEGER,MASTER,mtype, 
&     MPI_COMM_WORLD,ierr) 
    call MPI_SEND(a(1,offset),cols*NCA,MPI_DOUBLE_PRECISION,MASTER, 
&     mtype,MPI_COMM_WORLD,ierr) 
enddo 
    endif 
    call MPI_FINALIZE(ierr) 
    end 
관련 문제