用MPI写一个小的并行程序,想从root发送不同的数据到不懂的slave上,然后再slave上生成不同的动态数组,然后将这些动态数组返回到root上。但是在root下接收的时候触发了断点。请各位指教,谢谢!
代码如下:
#include <mpi.h>
#include <iostream>
#include <time.h>
#include <stdlib.h>
int main(int argc, char** argv)
{
int firstBreakPt, lateralBreakPt;
//int reMatNum1, reMatNum2;
int tmpN;
int breakPt[3][2]={{3,5},{6,9},{4,7}};
int myid, numprocs;
MPI_Status status;
// double *reMat1;
// double *reMat2;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
tmpN = 15;
if (myid==0)
{
// send three parameters to slaves;
for (int i=1;i<numprocs;i++)
{
MPI_Send(&tmpN,1,MPI_INT,i,0,MPI_COMM_WORLD);
firstBreakPt = breakPt[i-1][0];
lateralBreakPt = breakPt[i-1][1];
//std::cout<<i<<" "<<breakPt[i-1][0] <<" "<<breakPt[i-1][1]<<std::endl;
MPI_Send(&firstBreakPt,1,MPI_INT,i,1,MPI_COMM_WORLD);
MPI_Send(&lateralBreakPt,1,MPI_INT,i,2,MPI_COMM_WORLD);
}
// receive arrays from slaves;
for (int m =1; m<numprocs; m++)
{
MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
if (status.MPI_TAG==3)
{
int nElems;
MPI_Get_elements(&status, MPI_DOUBLE, &nElems);
// Allocate buffer of appropriate size
double *result = new double[nElems];
MPI_Recv(result,nElems,MPI_DOUBLE,m,3,MPI_COMM_WORLD,&status);
std::cout<<std::endl<<"My id is: "<<m<<", my source is "<< status.MPI_SOURCE<<std::endl;
for (int ii =0; ii<nElems; ii++)
{
std::cout<<result[ii]<<std::endl;
}
//delete [] result;
}
else if (status.MPI_TAG==4)
{
int nElems;
MPI_Get_elements(&status, MPI_DOUBLE, &nElems);
// Allocate buffer of appropriate size
double *result = new double[nElems];
MPI_Recv(result,nElems,MPI_DOUBLE,m,4,MPI_COMM_WORLD,&status);
std::cout<<std::endl<<"My id is: "<<m<<", my source is "<< status.MPI_SOURCE<<std::endl; // 触发断点;
for (int ii =0; ii<nElems; ii++)
{
std::cout<<result[ii]<<std::endl;
}
//delete [] result;
}
}
}
else
{
// receive three paramters from master;
MPI_Recv(&tmpN,1,MPI_INT,0,0,MPI_COMM_WORLD,&status);
MPI_Recv(&firstBreakPt,1,MPI_INT,0,1,MPI_COMM_WORLD,&status);
MPI_Recv(&lateralBreakPt,1,MPI_INT,0,2,MPI_COMM_WORLD,&status);
// width
int width1 = (rand() % (tmpN-firstBreakPt+1))+ firstBreakPt;
int width2 = (rand() % (tmpN-lateralBreakPt+1))+ lateralBreakPt;
// create dynamic arrays
double *reMat1 = new double[width1*width1];
double *reMat2 = new double[width2*width2];
for (int n=0;n<width1; n++)
{
for (int j=0;j<width1; j++)
{
reMat1[n*width1+j]=(double)rand()/RAND_MAX + (double)rand()/(RAND_MAX*RAND_MAX);
//a[i*Width+j]=1.00;
}
}
for (int k=0;k<width2; k++)
{
for (int h=0;h<width2; h++)
{
reMat2[k*width2+h]=(double)rand()/RAND_MAX + (double)rand()/(RAND_MAX*RAND_MAX);
//a[i*Width+j]=1.00;
}
}
// send it back to master
MPI_Send(reMat1,width1*width1,MPI_DOUBLE,0,3,MPI_COMM_WORLD);
MPI_Send(reMat2,width2*width2,MPI_DOUBLE,0,4,MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}