Sample file hello_world.c

------------------------------------------
/*
 *	Hewlett-Packard Co., High Performance Systems Division
 *
 *	Function:	- example: simple "hello world"
 *
 *	$Revision: 1.1.2.1 $
 */

#include 
#include 

main(argc, argv)

int			argc;
char			*argv[];

{
	int		rank, size, len;
	char            name[MPI_MAX_PROCESSOR_NAME];
	int to_wait = 0, sleep_diff = 0, max_limit = 0;
        double sleep_start = 0.0, sleep_now = 0.0;

	MPI_Init(&argc, &argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &size);
	
	MPI_Get_processor_name(name, &len);

	if (argc > 1)
        {
                to_wait = atoi(argv[1]);
        }

	//busy loop for debuging needs
	if (to_wait)
	{	
    		sleep_start=MPI_Wtime();
    		while(1)
    		{
        		max_limit++;
        		if(max_limit > 100000000)
        		{
                		fprintf(stdout,"--------  exit loop, to_wait: %d, \n", to_wait);
                		break;
        		}
                                                                                                                            
        		sleep_now = MPI_Wtime();
        		sleep_diff = (int)(sleep_now - sleep_start);
        		if(sleep_diff >= to_wait)
        		{
                		break;
        		}
    		}
	}

//	if (rank == 0) //only the first will print this message
		printf ("Hello world! I'm %d of %d on %s\n", rank, size, name);
//	}

	MPI_Finalize();
	exit(0);
}

------------------------------------------

compile command:
------------------------------------------
mpicc hello_world.c -o hello_world.ex
-----------------------------------------

Submission script:

Script file mpi_pbs_class_24.sh
(example with my email and course24)
------------------------------------------------------------------
#!/bin/sh
#
# job name (default is the name of pbs script file),  example is classtest_0x
#---------------------------------------------------
#PBS -N classtest_24
#
# Submit the job to the queue "queue_name"
# for our example "queue_name" is all_L-P
#---------------------------------------------------
#PBS -q  all_l_p
#
# Send the mail messages (see below) to the specified user address 
#-----------------------------------------------------------------
#PBS -M phr76ja@tx.technion.ac.il
#
# send me mail when the job begins
#---------------------------------------------------
#PBS -mb
# send me mail when the job ends  
#---------------------------------------------------
#PBS -me
# send me mail when the job aborts (with an error)
#---------------------------------------------------
#PBS -ma
#
# Comment: if you want more than one message to be sent, you must group 
# the flags in one line, otherwise only the last flag executes.
#-------------------------------------------------------------
#PBS -mbea
#
#
# resource limits: number and distribution of parallel processes 
#------------------------------------------------------------------ 
#PBS -l select=4:ncpus=12:mpiprocs=12 -l place=scatter
#
# comment: this select statement means: use M chunks (nodes), 
# use N (=< 12) CPUs for N mpi tasks on each of M nodes.
# for our example M=4 and N=12 
# "scatter" will use exactly N CPUs from each node, while omitting 
# "-l place" statement will fill all available CPUs of M nodes 
#  
# resource limits: max amount of memory P Gb to be used by the job
#-------------------------------------------------------------
#PBS -l select=mem=PGb
#
#  resource limits: max. wall clock time during which job can run
#-----------------------------------------------------------------
#PBS -l walltime=3:20:00
#
#  specifying working directory, our example is in test
#------------------------------------------------------
PBS_O_WORKDIR=$HOME/test
cd $PBS_O_WORKDIR
#
# running MPI executable with M*N processes, our example has M*N=48
# we have no input file so we replace the commented line with
#------------------------------------------------------
# mpirun -np M*N  ./mpi_program.ex < input.file
mpirun -np 48  ./hello_world.ex 
#
# comment: the "np" must be equal the number of chunks multiplied by the number of "ncpus"

----------------------------------------------------------------------

-----------------------------------------------------------------

Batch submission to the queue all_l__p  is done with the command:
---------------------------------------------------------------
qsub  mpi_pbs_class_24.sh
--------------------------------------------------------------
you get a response like:
70647.admin

``70647'' is the job number and 
you can view its progress with qstat
to get a  response like this one  (this is for a script called 
pbs_mpi.sh to the queue workq):

Job id            Name             User              Time Use S Queue
----------------  ---------------- ----------------  -------- - -----
25.admin          test             gkoren            00:01:00 E workq           
27.admin          test             gkoren            00:01:00 E workq           
29.admin          pbs_mpi.sh       phr76ja           00:00:00 R workq           
------------------------------------------------------------------
When your 70647 job is  finished you will get 2 new files 

classtest_24.o70647  classtest_24.e70647  
------------------------------------------------------------------
The interesting one is the first one:
tamnun [/u/course24/test] 121 > more classtest_24.o70647

Warning: no access to tty (Bad file descriptor).
Thus no job control in this shell.
Hello world! I'm 14 of 48 on n023
Hello world! I'm 24 of 48 on n024
Hello world! I'm 36 of 48 on n025
Hello world! I'm 31 of 48 on n024
Hello world! I'm 13 of 48 on n023
Hello world! I'm 37 of 48 on n025
Hello world! I'm 10 of 48 on n009
Hello world! I'm 19 of 48 on n023
Hello world! I'm 28 of 48 on n024
Hello world! I'm 5 of 48 on n009
Hello world! I'm 35 of 48 on n024
Hello world! I'm 17 of 48 on n023
Hello world! I'm 38 of 48 on n025
Hello world! I'm 20 of 48 on n023
Hello world! I'm 21 of 48 on n023
Hello world! I'm 18 of 48 on n023
Hello world! I'm 23 of 48 on n023
Hello world! I'm 25 of 48 on n024
Hello world! I'm 15 of 48 on n023
Hello world! I'm 4 of 48 on n009
Hello world! I'm 33 of 48 on n024
Hello world! I'm 15 of 48 on n023
Hello world! I'm 4 of 48 on n009
Hello world! I'm 33 of 48 on n024
Hello world! I'm 34 of 48 on n024
Hello world! I'm 16 of 48 on n023
Hello world! I'm 32 of 48 on n024
Hello world! I'm 39 of 48 on n025
Hello world! I'm 40 of 48 on n025
Hello world! I'm 29 of 48 on n024
Hello world! I'm 41 of 48 on n025
Hello world! I'm 12 of 48 on n023
Hello world! I'm 0 of 48 on n009
Hello world! I'm 30 of 48 on n024
Hello world! I'm 22 of 48 on n023
Hello world! I'm 7 of 48 on n009
Hello world! I'm 26 of 48 on n024
Hello world! I'm 6 of 48 on n009
Hello world! I'm 27 of 48 on n024
Hello world! I'm 11 of 48 on n009
Hello world! I'm 42 of 48 on n025
Hello world! I'm 44 of 48 on n025
Hello world! I'm 43 of 48 on n025
Hello world! I'm 1 of 48 on n009
Hello world! I'm 2 of 48 on n009
Hello world! I'm 45 of 48 on n025
Hello world! I'm 8 of 48 on n009
Hello world! I'm 46 of 48 on n025
Hello world! I'm 9 of 48 on n009
Hello world! I'm 3 of 48 on n009
Hello world! I'm 47 of 48 on n025
-------------------------------------------------------------------------
You will also get emails saying job finished.