% A program to calculate pi in parallel fashion % Modelled after the MPI "pi.f" program % clear; % "hosts" is defined at startup in .octaverc - don't clear! PI25DT = 3.141592653589793238462643 NMAX=10000000; % NMAX is the number of terms in the expansion for pi % First, single processor code: t0 = clock(); h=1/NMAX; a=1:NMAX; b=h*(a-0.5); PI=h*sum(4./(1+b.*b)) ERROR=PI25DT-PI elapsed_time=etime(clock(),t0) %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Now, multiprocessor code: sockets = connect(hosts); % open a connection to the variable hosts; hosts=["n50"; "n51"; ... "n55"]; if 1==2, % verify all connections - myid runs from 1-5 % myid = 0 is the master node for k=2:6, reval( "send(myid,sockets(1,:))", sockets(k,:)); myid_k = recv(sockets(k,:)) end end t0 = clock(); nproc=6; h=1/NMAX; % first send over a few variables: for l=2:6, send(nproc,sockets(l,:)); reval( "nproc=recv(sockets(1,:));",sockets(l,:)); send(NMAX,sockets(l,:)); reval( "NMAX=recv(sockets(1,:));",sockets(l,:)); send(h,sockets(l,:)); reval( "h=recv(sockets(1,:));",sockets(l,:)); end % now send over the command(s) to execute: for l=2:6, reval(["a=(myid+1):nproc:NMAX"; "b=h*(a-0.5)"; "c=h*sum(4./(1+b.*b))"],sockets(l,:)); end % now calculate for l=1, the master node, while the others grind away as well... h=1/NMAX; a=1:nproc:NMAX; b=h*(a-0.5); c=h*sum(4.0./(1 + b.*b)); % send the answers, "c", from the slave nodes to the master node. Store them in psum: psum=zeros(1,6); for l=2:6, reval("send(c,sockets(1,:))",sockets(l,:)); psum(l)=recv(sockets(l,:)); % blocking receives; code won't continue until something is received end PI=c + sum(psum) ERROR=PI25DT-PI elapsed_time=etime(clock(),t0) closeall(sockets);