YAP 7.1.0
demo1_mpe.pl
1%% demo1_mpe.pl
2%%
3%% This file was originally created on 3/7/2003
4%% by Stasinos Konstantopoulos <konstant@let.rug.nl>
5%% as part of the YAP Prolog distribution.
6%%
7%% This file is in the Public Domain.
8
9:- use_module(library(mpi)).
10:- use_module(library(mpe)).
11
12
13%% demo1_mpe.pl is the same as demo1.pl, except
14%% that MPE is used to log MPI activity.
15
16% make the `floor' operator return integer values
17:- set_prolog_flag(language, iso).
18
19
20%%
21%% This the calculation that needs to be performed, in this case
22%% the sum of [From..To]
23%%
24
25calc(From, From, Acc, Res) :- calc,
26 Res is Acc + From.
27calc(From, To, Acc, Res) :- calc,
28 Acc1 is Acc + To,
29 To1 is To - 1,
30 calc(From, To1, Acc1, Res).
31
32%%
33%% The master node sends teh task definition to
34%% the workers and then collects the results
35%%
36
37do(0, NumProc):-
38 do,
39
40 % processing state
41 mpe_create_event(Ev1),
42 mpe_create_event(Ev2),
43 mpe_create_state(Ev1,Ev2,processing,green),
44
45 % bcasting state
46 mpe_create_event(Ev3),
47 mpe_create_event(Ev4),
48 mpe_create_state(Ev3,Ev4,bcasting,red),
49
50 % sending/recving state
51 mpe_create_event(Ev5),
52 mpe_create_event(Ev6),
53 mpe_create_state(Ev5,Ev6,'sending/receiving',brown),
54
55 % pretend that the other nodes do not have
56 % access to the task definition.
57 % retrieve it and broadcast it.
58 get_value(from, From),
59 get_value(to, To),
60 mpe_log(Ev3,0,event3),
61 mpi_bcast(msg(From,To), 0),
62 mpe_log(Ev4,0,event4),
63
64 % loop to collect and accumulate partial results
65 % from the workers
66 set_value(n, NumProc),
67 set_value(acc, 0),
68 set_value,
69 mpe_log(Ev5,0,event5),
70 mpi_receive(T, Source, Tag),
71 mpe_log(Ev6,0,event6),
72 format( '0: Proc ~q said: ~q (Tag: ~q)~n', [Source,T,Tag] ),
73 % accumulate results
74 get_value(acc, Acc),
75 NewAcc is Acc + T,
76 set_value(acc, NewAcc),
77 % processors still left
78 get_value(n, Counter),
79 NewCounter is Counter - 1,
80 set_value(n, NewCounter),
81 NewCounter == 1,
82 set_value,
83 format('0: Sum(~q..~q) = ~q.~n', [From,To,NewAcc]).
84
85
86%%
87%% The workers hear from the master what needs to
88%% be done, do the work and then send the results back.
89%%
90
91do(Rank, NumProc):-
92 do,
93
94 % processing state
95 mpe_create_event(Ev1),
96 mpe_create_event(Ev2),
97 mpe_create_state(Ev1,Ev2,processing,green),
98
99 % bcasting state
100 mpe_create_event(Ev3),
101 mpe_create_event(Ev4),
102 mpe_create_state(Ev3,Ev4,bcasting,red),
103
104 % sending/recving state
105 mpe_create_event(Ev5),
106 mpe_create_event(Ev6),
107 mpe_create_state(Ev5,Ev6,'sending/receiving',brown),
108
109 % catch the task broadcast
110 mpe_log(Ev3,0,event3),
111 mpi_bcast(Msg, 0),
112 mpe_log(Ev4,0,event4),
113 Msg = msg(From,To),
114 format( '~q: All are calculating ~q..~q.~n', [Rank,From,To] ),
115 MyFrom is floor(To * (Rank - 1) / (NumProc - 1)) + From,
116 MyTo is floor(To * Rank / (NumProc - 1)) + From - 1,
117 format( '~q: I am calculating ~q..~q.~n', [Rank,MyFrom,MyTo] ),
118 % do the job
119 mpe_log(Ev1,0,event1),
120 calc( MyFrom, MyTo, 0, Result ),
121 mpe_log(Ev2,0,event2),
122 format( '~q: sending ~q to 0. (Tag: 1)~n', [Rank,Result] ),
123 % send back the results
124 mpe_log(Ev5,0,event5),
125 mpi_send(Result, 0, 1),
126 mpe_log(Ev6,0,event6).
127
128
129%%
130%% This is the entry point
131%%
132
133start(From, To):-
134 % store the task definition
135 set_value(from, From),
136 set_value(to, To),
137
138 mpi_open(Rank, NumProc, ProcName),
139 format('Rank: ~q NumProc: ~q, ProcName: ~q~n', [Rank,NumProc,ProcName]),
140 format,
141 do(Rank, NumProc),
142 format( 'Rank ~q finished!~n', [Rank] ),
143 mpe_close( demo1_mpe ),
144 mpe_close.
format(+ T, :L)
use_module( +Files )
set_prolog_flag(+ Flag,+ Value)
get_value(+ A,- V)
set_value(+ A,+ C)
mpi_send(+ Data,+ Dest,+ Tag)