[并行与分布式程序设计] Xcode上MPI编程环境搭建 & Hello World实现

  1. Running Open MPI on macOS

  2. brew install mpich

  3. Xcode设置条件编译参数

  4. Debugging and Running MPI in Xcode. 注意其中的工程设置!! 很有用

    #include <iostream>
    #include <mpi.h>
    
    // C
    //int main(int argc, char * argv[]){
    //    int my_id, num_procs;
    //    int name_len;
    //    char processer_name[MPI_MAX_PROCESSOR_NAME];
    //    MPI_Init(&argc, &argv);
    //    MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
    //    MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
    //    MPI_Get_processor_name(processer_name, &name_len);
    //    printf("hello world! Process %d of %d on %s\n", my_id, num_procs, processer_name);
    //
    //    MPI_Finalize();
    //}
    
    // C++
    int main(int argc, char * argv[]) {
        int rank, size, name_len;
        char processer_name[MPI_MAX_PROCESSOR_NAME];
        MPI::Init(argc, argv);
        rank = MPI::COMM_WORLD.Get_rank();
        size = MPI::COMM_WORLD.Get_size();
        // 和C语言不同, 第二个参数C中是指针, C++中是引用
        MPI::Get_processor_name(processer_name, name_len);
        
        std::cout<<"Greetings from process "<<rank<<" of "<<size<<" on "<<processer_name<<"\n";
        MPI::Finalize();
        return 0;
    }
    
  5. Mac OS X下利用MPI进行并行计算

  6. Xcode中使用MPIRUN运行, 需要把MPI件夹中的那个二进制运行文件复制一下(mpiruncopy)才能被选中

  7. MPI_Send 和 MPI_Recv 的使用, ANY_TAG, ANY_SORCE

    #include <stdio.h>
    #include <mpi.h>
    
    // C
    int main(int argc, char * argv[]){
        int my_id;
        int name_len;
        char message[20];
        MPI_Status status;
        char processer_name[MPI_MAX_PROCESSOR_NAME];
        MPI_Init(&argc, &argv);
        MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
        MPI_Get_processor_name(processer_name, &name_len);
        if (my_id == 0) {
            strcpy(message, "Hello Process 1");
            printf("Process 0 on %s send %s\n", processer_name, message);
            MPI_Send(message, 20, MPI_CHAR, 1, 99, MPI_COMM_WORLD);
        }
        else if (my_id == 1) {
            MPI_Recv(message, 20, MPI_CHAR, 0, 99, MPI_COMM_WORLD, &status);
            printf("Process 1 on %s received %s\n", processer_name, message);
        }
        MPI_Finalize();
        return 0;
    }
    
    #include <stdio.h>
    #include <mpi.h>
    
    int main(int argc, char * argv[]){
        int rank, size, buf[1];
        MPI_Status status;
        MPI_Init(&argc, &argv);
        MPI_Comm_rank(MPI_COMM_WORLD, &rank);
        MPI_Comm_size(MPI_COMM_WORLD, &size);
        if (rank == 0) {
            for (int i = 0; i < (size - 1); i++) {
                MPI_Recv(buf, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
                printf("Msg=%d from %d with tag %d\n", buf[0], status.MPI_SOURCE, status.MPI_TAG);
            }
        }
        else {
            buf[0] = rank;
            MPI_Send(buf, 1, MPI_INT, 0, 100-rank, MPI_COMM_WORLD);
        }
        MPI_Finalize();
        return 0;
    }
    

猜你喜欢

转载自blog.csdn.net/weixin_40996518/article/details/106247508