MPI_Gather分段错误

我有这个并行高斯消元码。 调用MPI_Gather函数调用时发生分段错误。 我知道如果没有为任何缓冲区正确分配内存,这样的错误可能会增加。 但我看不出内存管理代码有任何问题。

有人可以帮忙吗?

谢谢。

注意:程序从名为input.txt的同一目录中的.txt文件中读取。

码:

 #include  #include  #include  #include "mpi.h" /*void print2dAddresses(double** array2d, int rows, int cols) { int i; for(i = 0; i < rows; i++) { int j; for(j = 0; j < cols; j++) { printf("%d ", &(array2d[i][j])); } printf("\n"); } printf("------------------------------------"); }*/ double** newMatrix(int rows, int cols) { double *data = (double*) malloc(rows * cols * sizeof(double)); double **array= (double **)malloc(rows * sizeof(double*)); int i; for (i=0; i<rows; i++) array[i] = &(data[cols*i]); return array; } void freeMatrix(double** mat) { free(mat[0]); free(mat); } double** new2dArray(int nrows, int ncols) { int i; double** array2d; array2d = (double**) malloc(nrows * sizeof(double*)); for(i = 0; i < nrows; i++) { array2d[i] = (double*) malloc(ncols * sizeof(double)); } return array2d; } double* new1dArray(int size) { return (double*) malloc(size * sizeof(double)); } void free2dArray(double** array2d, int nrows) { int i; for(i = 0; i < nrows; i++) { free(array2d[i]); } free(array2d); } void print2dArray(double** array2d, int nrows, int ncols) { int i, j; for(i = 0; i < nrows; i++) { for(j = 0; j < ncols; j++) { printf("%lf ", array2d[i][j]); } printf("\n"); } printf("----------------------\n"); } void print1dArray(double* array, int size) { int i; for(i = 0; i < size; i++) { printf("%lf\n", array[i]); } printf("----------------------\n"); } void read2dArray(FILE* fp, double** array2d, int nrows, int ncols) { int i, j; for(i = 0; i < nrows; i++) { for(j = 0; j < ncols; j++) { fscanf(fp, "%lf", &(array2d[i][j])); } } } void read1dArray(FILE* fp, double* array, int size) { int i; for(i = 0; i < size; i++) { fscanf(fp, "%lf", &(array[i])); } } void readSymbols(char* symbols, int size, FILE* fp) { int i; for(i = 0; i < size; i++) { char c = '\n'; while(c == '\n' | c == ' ' | c == '\t' | c == '\r') fscanf(fp, "%c", &c); symbols[i] = c; } } void printSolution(char* symbols, double* x, int size) { int i; for(i = 0; i < size; i++) { printf("%c = %lf\n", symbols[i], x[i]); } } double* copy_1d_array(double* original, int size) { double* copy_version; int i; copy_version = (double*) malloc(size * sizeof(double)); for(i = 0; i < size; i++) { copy_version[i] = original[i]; } return copy_version; } int main(int argc, char** argv) { int p, rank, i, j, k, l, msize, rowsPerProcess, remainder, startingRow, dest, rowCounter, remainingRows, neededProcesses; double **A, *b, *x, **smallA, *currentRow, *smallB, currentB, **receivedA, *receivedB; char *symbols; MPI_Status status; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &p); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if(rank == 0) { FILE* fp; fp = fopen("input.txt", "r"); fscanf(fp, "%d", &msize); A = newMatrix(msize, msize); b = new1dArray(msize); x = new1dArray(msize); symbols = (char*) malloc(msize * sizeof(char)); read2dArray(fp, A, msize, msize); read1dArray(fp, b, msize); readSymbols(symbols, msize, fp); fclose(fp); /*print2dArray(A, msize, msize); print1dArray(b, msize);*/ } MPI_Bcast(&msize, 1, MPI_INT, 0, MPI_COMM_WORLD); for(i = 0; i < (msize - 1); i++) { int maxIndex; double maxCoef, tmp, r; /*finding max row*/ if(rank == 0) { maxIndex = i; maxCoef = fabs(A[i][i]); for(j = i + 1; j  maxCoef) { maxCoef = A[j][i]; maxIndex = j; } } /*swapping the current row with the max row*/ for(j = 0; j < msize; j++) { tmp = A[i][j]; A[i][j] = A[maxIndex][j]; A[maxIndex][j] = tmp; } tmp = b[i]; b[i] = b[maxIndex]; b[maxIndex] = tmp; /*elimination*/ /*for(j = i + 1; j < msize; j++) { double r = A[j][i] / A[i][i]; subtracting r * row i from row j for(k = i; k < msize; k++) { A[j][k] -= r * A[i][k]; } b[j] -= r * b[i]; }*/ /*parallel elimination*/ startingRow = i + 1; neededProcesses = p; remainingRows = msize - startingRow; if(remainingRows < neededProcesses) { neededProcesses = remainingRows; } rowsPerProcess = remainingRows / neededProcesses; remainder = remainingRows % neededProcesses; } MPI_Bcast(&startingRow, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&rowsPerProcess, 1, MPI_INT, 0, MPI_COMM_WORLD); if(rank == 0) { currentRow = copy_1d_array(A[startingRow-1], msize); currentB = b[startingRow-1]; } else { currentRow = new1dArray(msize); } MPI_Bcast(currentRow, msize, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&currentB, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); if(rank == 0) { receivedA = newMatrix(remainingRows, msize); receivedB = new1dArray(remainingRows); } smallA = newMatrix(rowsPerProcess, msize); smallB = new1dArray(rowsPerProcess); MPI_Scatter(&(A[startingRow][0]), rowsPerProcess*msize, MPI_DOUBLE, &(smallA[0][0]), rowsPerProcess*msize, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Scatter(&(b[startingRow]), rowsPerProcess, MPI_DOUBLE, &(smallB[0]), rowsPerProcess, MPI_DOUBLE, 0, MPI_COMM_WORLD); for(j = 0; j < rowsPerProcess; j++) { r = smallA[j][startingRow-1] / currentRow[startingRow-1]; for(k = 0; k < msize; k++) { smallA[j][k] -= r * currentRow[k]; } smallB[j] -= r * currentB; } MPI_Gather(&(smallA[0][0]), rowsPerProcess*msize, MPI_DOUBLE, &(receivedA[0][0]), rowsPerProcess*msize, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Gather(&(smallB[0]), rowsPerProcess, MPI_DOUBLE, &(receivedB[0]), rowsPerProcess, MPI_DOUBLE, 0, MPI_COMM_WORLD); freeMatrix(smallA); free(smallB); if(rank == 0) { for(j = 0; j < remainingRows; j++) { for(k = 0; k  0) { for(j = (msize - remainder); j < msize; j++) { r = A[j][i] / A[i][i]; for(k = 0; k = 0; i--) { x[i] = b[i]; for(j = msize - 1; j > i; j--) { x[i] -= A[i][j] * x[j]; } x[i] /= A[i][i]; } printf("solution = \n"); //print1dArray(x, msize); printSolution(symbols, x, msize); freeMatrix(A); free(b); free(x); free(symbols); } MPI_Finalize(); return 0; } 

输入文件:

 3 1 1 1 1 1 3 2 1 4 4 9 12 x y z 

它可能是这样的: &(receivedA[0][0])rank != 0进程上。 您正在索引尚未分配的数组。 您可能必须创建另一个指针,如下所示:

  if(rank == 0) { receivedA = newMatrix(remainingRows, msize); recievedAHead = &(receivedA[0][0]); receivedB = new1dArray(remainingRows); } else { recievedAHead = NULL; } 

并在MPI_Gather调用中使用recievedAHead。