ms mpi error: unable to allocate launching block - c++

I created simple console programm in VS 2015, using ms mpi.
#include <stdio.h>
#include <mpi.h>
#include <stdlib.h>
int main(int argc, char **argv)
{
int rank=0, size=0;
MPI_Init(&argc, &argv); /* starts MPI */
MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* get current process id */
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (rank == 0)
{
char helloStr[] = "Hello World";
// MPI_Send(helloStr, _countof(helloStr), MPI_CHAR, 1, 0, MPI_COMM_WORLD);
}
else if (rank == 1)
{
char helloStr[12];
MPI_Recv(helloStr, _countof(helloStr), MPI_CHAR, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("Rank 1 received string %s from Rank 0\n", helloStr);
}
printf("hello from proccess rank %d from size %d\n",rank,size);
MPI_Finalize();
return 0;
}
This programm compile and execute. But if use > mpiexec -n 2 myprog.exe, I get error: unable to allocate launching block.

Since you are using VS2015, I guess the reason is your username contains non-ASCII characters.
Try to run mpiexec in path which only contains ASCII characters.

Related

execvp error on file ./Fan (No such file or directory)

Here is my MPI_Bcast code first.
#include <iostream>
#include <mpi.h>
using namespace std;
int main(int argc, char* argv[])
{
int rank;
int data=0;
MPI_Init(&argc,&argv);
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
//means processors from 0 to 10
if (rank == 0){
data = 10;
}
printf("/nbefore bcast data in Process %d : %d" , rank, data);
MPI_Bcast(&data,1,MPI_INT, 0, MPI_COMM_WORLD);
printf("/nafter bcast data in Process %d : %d" , rank, data);
MPI_Finalize();
return 0;
}
After run, the "outcome" said: [proxy:0:0#condo038] HYDU_create_process (../../utils/launch/launch.c:825): execvp error on file ./Fan (No such file or directory).
screenshot of the "outcome"
Does anyone know what happened?
Is the code correct?
How to get expected outcome?

Cluster only runs with 7 or fewer tasks using MPI

I finally got my program working on the cluster using MPI, but once I scaled it past 7 tasks I get a segmentation fault. I went back to a super basic program, below, and I still get the segmentation fault past 7 cores. To debug, I allocated 8 cores to myself on the cluster to directly work on rather than submitting a slurm job. I get the issue using Intel compiler and GCC. I thought it would break at 8 processors, but it is 7 and I find that a bit weird. But then again, I find the whole thing weird. Any idea of why the code breaks beyond a certain number of allocated cores (on the same node might I add).
On the cluster I use this:
$salloc -n 8
$enable_lmod
$module load icc/19 impi/19 libstdcxx/4
$mpiicpc -std=c++11 -o MPItest test.cpp
$mpiexec.hydra ./MPItest test
#include "mpi.h"
#include <stdio.h>
int main(int argc, char** argv)
{
int numtasks, rank, dest, source, rc, count, tag=1;
double inmsg, outmsg=20.0;
MPI_Status Stat;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0)
{
printf("Number of processors: %d \n",numtasks);
for (int i=1; i<numtasks;i++)
{
dest = i;
outmsg+=outmsg;
rc = MPI_Send(&outmsg, 1, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
for (int i=1; i<numtasks;i++)
{
source = i;
rc = MPI_Recv(&inmsg, 1, MPI_DOUBLE, source, tag, MPI_COMM_WORLD, &Stat);
rc = MPI_Get_count(&Stat, MPI_DOUBLE, &count);
printf("Task %d: Received %d double(s), %f, from task %d with tag %d \n",
rank, count,inmsg, Stat.MPI_SOURCE, Stat.MPI_TAG);
}
}
else
{
dest = 0;
source = 0;
rc = MPI_Recv(&inmsg, 1, MPI_DOUBLE, source, tag, MPI_COMM_WORLD, &Stat);
rc = MPI_Get_count(&Stat, MPI_DOUBLE, &count);
printf("Task %d: Received %d double(s), %f, from task %d with tag %d \n",
rank, count,inmsg, Stat.MPI_SOURCE, Stat.MPI_TAG);
rc = MPI_Send(&inmsg, 1, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}

MPI causes breakpoint

I've made an application that counts number of characters using MS MPI, but it causes 4 breakpoints at:
MPI_File_get_size, MPI_File_set_view, MPI_File_read and again at MPI_File_get_size(fh, &size).
Do you know what may cause them? Full code below:
#include "stdafx.h"
#include "mpi.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stack>
using namespace std;
int main(int argc, char *argv[])
{
int numprocs, rank, buffer[100];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Status status;
MPI_File fh;
MPI_Offset size;
int char_number;
const char plik[10] = "file.txt";
MPI_File_open(MPI_COMM_WORLD, plik, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
MPI_File_get_size(fh, &size);
MPI_File_set_view(fh, rank*(size / numprocs), MPI_CHAR, MPI_CHAR, "native", MPI_INFO_NULL);
MPI_File_read(fh, &buffer[100], 1, MPI_CHAR, &status);
char_number = MPI_File_get_size(fh, &size);
MPI_File_close(&fh);
if (rank == 0) {
for (int i = 0; i < numprocs; i++) {
MPI_Recv(&char_number, i, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
}
}
else {
MPI_Send(&char_number, 0, MPI_INT, 0, 3, MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}
EDIT: Got rid of breakpoints, but not receiving any output now:
#include "stdafx.h"
#include "mpi.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stack>
using namespace std;
int main(int argc, char *argv[])
{
int numprocs, rank;
char buffer[100] = { 0 };
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Status status;
MPI_File fh;
MPI_Offset size;
int char_number;
const char plik[10] = "file.txt";
MPI_File_open(MPI_COMM_WORLD, plik, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
MPI_File_get_size(fh, &size);
MPI_File_set_view(fh, rank*(size / numprocs), MPI_CHAR, MPI_CHAR, "native", MPI_INFO_NULL);
MPI_File_read(fh, buffer, (size/numprocs), MPI_CHAR, &status);
char_number = MPI_File_get_size(fh, &size);
MPI_File_close(&fh);
if (rank == 0) {
for (int i = 0; i < numprocs; i++) {
MPI_Recv(&char_number, i, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
}
cout << "There is: " << char_number << " characters in file.txt";
}
else {
MPI_Send(&char_number, 0, MPI_INT, 0, 3, MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}
It might be your breakpoints are still on the list:
I deleted a breakpoint, but I continue to hit it when I start
debugging again
If you deleted a breakpoint while debugging, in some
cases you may hit the breakpoint again the next time you start
debugging. To stop hitting this breakpoint, make sure all the
instances of the breakpoint are removed from the Breakpoints window.
Source: https://msdn.microsoft.com/en-us/library/5557y8b4.aspx
I remember similar situation in the past with some executables. Removing breakpoint from the source code was not enough.
It also might be that you are trying to access non-existing file.
It also might be that your code is not quite OK.
#include "mpi.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stack>
using namespace std;
int main(int argc, char *argv[])
{
int numprocs, rank;
// buffer will keep input data
char buffer[100] = { 0 };
// initialization of MPI world
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Status status;
MPI_File fh;
MPI_Offset size;
int char_number;
// name of the file with data
const char plik[10] = "file.txt";
MPI_File_open(MPI_COMM_WORLD, plik, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
MPI_File_get_size(fh, &size);
// we have to be careful here
// in this sample we have strong assumption that size of data
// divides without the reminder by number of processors!
MPI_File_set_view(fh, rank*(size / numprocs), MPI_CHAR, MPI_CHAR, "native", MPI_INFO_NULL);
MPI_File_read(fh, buffer, (size/numprocs), MPI_CHAR, &status);
char_number = MPI_File_get_size(fh, &size);
MPI_File_close(&fh);
printf("My rank: %d - my data: %s\n", rank, buffer);
if (rank == 0) {
for (int i = 1; i < numprocs; i++) {
MPI_Recv(&buffer[i*(size / numprocs)], (size/numprocs), MPI_CHAR, i, 0, MPI_COMM_WORLD, &status);
}
printf("I have collected data: %s\n",buffer);
}
else {
MPI_Send(&buffer[0], rank*(size / numprocs), MPI_CHAR, 0, 0, MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}
For input file
> cat file.txt
abcdefgh
works as expected:
> mpirun -np 2 ./simple2
My rank: 0 - my data: abcd
I have collected data: abcdefgh
My rank: 1 - my data: efgh
Question is, whether this is what you are looking for.

Cannot use file i\o when running mpi

I have a problem in MPI, when I run my MPI program from wmpiexec I cannot make any file io. But when I run it in visual studio it works fine.
I tried the usual see fopen() as well as the mpi APIs nothing works.
(the file already exists)
/* This is an interactive version of cpi */
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main(int argc, char *argv[])
{
int namelen, numprocs, rank;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Get_processor_name(processor_name, &namelen);
MPI_Status status;
MPI_File fh;
char x='x';
if (rank == 0) {
MPI_File_open(MPI_COMM_SELF, "test.txt", MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
//FILE* f = fopen("test.txt","wb+");
//if(f==NULL){
//printf("failed to open file\n");exit(1);
//}
for (int i = 0; i < 5; i++) {
char buf[42];
//fprintf(f,"%d \n",i);
snprintf(buf, 42, "%d \n", i);
MPI_File_read(fh,&x,sizeof(char), MPI_CHAR, &status);
printf("%c",x);
}
getchar();
// fclose(f);
MPI_File_close(&fh);
}
else {
// do nothing
}
MPI_Finalize();
return 0;
}
The error code returned by the function MPI_File_open() can be tested to know why the opening of the file failed. Indeed, as underlined in the documentation of this function:
For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error.
As a result the default behavior of MPI_File_open() is not to report errors and try to keep going, even if the file does not exist. Therefore, retreiving the value of the error code and tunning the error handler seems a promissing way to investigate the issue. Here is a sample code code based on yours doing so. It can be compiled by mpicc main.c -o main -std=c99 -Wall and run by mpirun -np 2 main
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// A custom error handler
void file_errhandler_fn(MPI_File * file, int * errorcode, ... );
void file_errhandler_fn(MPI_File * file, int * errorcode, ... ){
if(*errorcode!=MPI_SUCCESS){
//your error handler here...
char error[MPI_MAX_ERROR_STRING+1];
int errorlen;
MPI_Error_string(*errorcode, error, &errorlen);
fprintf(stderr,"error handler: %s\n",error);
fflush(stderr);
//visible painful death, easily spotted
exit(1);
}
}
int main(int argc, char *argv[])
{
int namelen, numprocs, rank;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Get_processor_name(processor_name, &namelen);
MPI_Status status;
MPI_File fh;
//creating an error handler for the file, calling your custom function:
MPI_Errhandler errhandler;
MPI_File_create_errhandler(file_errhandler_fn, &errhandler);
char x='x';
if (rank == 0) {
int errorcode=MPI_File_open(MPI_COMM_SELF, "test.txt", MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
//in case the opening of the file went wrong, handle the error:
if(errorcode!=MPI_SUCCESS){
//your error handler here...
char error[MPI_MAX_ERROR_STRING+1];
int errorlen;
MPI_Error_string(errorcode, error, &errorlen);
fprintf(stderr,"opening file: %s\n",error);
fflush(stderr);
//visible painful death, easily spotted
exit(1);
}
//setting custom error handlerr for further operations
errorcode=MPI_File_set_errhandler(fh,errhandler);
//FILE* f = fopen("test.txt","wb+");
//if(f==NULL){
//printf("failed to open file\n");exit(1);
//}
for (int i = 0; i < 5; i++) {
char buf[42];
//fprintf(f,"%d \n",i);
snprintf(buf, 42, "%d \n", i);
errorcode=MPI_File_read(fh,&x,sizeof(char), MPI_CHAR, &status);
printf("index %d got %c",i,x);
// the following line should compile fine, but triggers MPI_ERR_COUNT, captured by the custom error handler.
//errorcode=MPI_File_write(fh, &i, -1, MPI_INT, MPI_STATUS_IGNORE);
}
getchar();
// fclose(f);
MPI_File_close(&fh);
}
else {
// do nothing
}
MPI_Errhandler_free(&errhandler);
MPI_Finalize();
return 0;
}
Doing so may not fully solve your issue, but it provides a way to investigate the issue. What is the value of the error code? If it does not help you, let me know so I can suppress my answer an leave your question unnanswered!

MPI_Sendrecv_replace() dead-lock issue

I'm doing my homework whith following assignment:
Every process takes a double as an input. Using function
MPI_Sendrecv_replace() swap all doubles with processes of opposite
rank (first & last, second & last but one, ...). In every process output recieved number.
So here is the code that I wrote.
#include "mpi.h"
#include <stdio.h>
#include "pt4.h"
int main(int argc, char *argv[])
{
MPI_Init(&argc,&argv);
int flag;
MPI_Initialized(&flag);
if (flag == 0)
return;
int rank, size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
double n;
pt >> n; // pt is a stream provided by side library (works perfectly fine)
int oppositeRank = (size - 1) - rank;
if (rank != oppositeRank)
{
MPI_Status status;
MPI_Sendrecv_replace(&n, 1, MPI_DOUBLE, oppositeRank, 0,
rank, 0, MPI_COMM_WORLD, &status);
}
pt << n;
MPI_Finalize();
return 0;
}
Although this code compiles without any problems, it never stops. So the question is why? What am I doing wrong?
Replace this:
MPI_Sendrecv_replace(&n, 1, MPI_DOUBLE, oppositeRank, 0,
rank, 0, MPI_COMM_WORLD, &status);
with this:
MPI_Sendrecv_replace(&n, 1, MPI_DOUBLE, oppositeRank, 0,
oppositeRank, 0, MPI_COMM_WORLD, &status);
You may find this documentation page useful.
This function sends the buffer to a processor (dest, or the 4th argument) and receives from another (source, the 6th argument). To do a swap you send to another rank and receive from that same rank. In your case you were sending to the opposite rank and receiving from yourself, which would never come, hence the deadlock.