I'm working through the examples on the Erlang website on Concurrency. I'm currently working on section 3.5 which implements a simple chat messenger.
Here is the code:
-module(messenger).
-export([start_server/0, server/1, goOnline/1, goOffline/0, message/2, client/2]).
server_node() ->
messenger#bill.
server(User_List) ->
receive
{From, logon, Name} ->
New_User = server_goOnline(From, Name, User_List),
server(New_User);
{From, logoff} ->
New_User = server_goOffline(From, User_List),
server(New_User);
{From, message_to, To, Message} ->
server_transfer(From, To, Message, User_List),
io:format("list is now: ~p~n", [User_List]),
server(User_List)
end.
start_server() ->
register(messenger, spawn(messager, server, [[]])).
server_goOnline(From, Name, User_List) ->
case lists:keymember(Name, 2, User_List) of
true ->
From ! {messenger, stop, user_exists_at_other_node},
User_List;
false ->
From ! {messenger, logged_on},
[{From, Name} | User_List]
end.
server_goOffline(From, User_List) ->
lists:keydelete(From, 1, User_List).
server_transfer(From, To, Message, User_List) ->
case lists:keysearch(From, 1, User_List) of
false ->
From ! {messenger, stop, you_are_not_logged_on};
{value, {From, Name}} ->
server_transfer(From, Name, To, Message, User_List)
end.
server_transfer(From, Name, To, Message, User_List) ->
case lists:keysearch(To, 2, User_List) of
false ->
From ! {messenger, receiver_not_found};
{value, {ToPid, To}} ->
ToPid ! {message_from, Name, Message},
From ! {messenger, sent}
end.
goOnline(Name) ->
case whereis(mess_client) of
undefined ->
register(mess_client,
spawn(messenger, client, [server_node(), Name]));
_ -> already_logged_on
end.
goOffline() ->
mess_client ! logoff.
message(ToName, Message) ->
case whereis(mess_client) of
undefined ->
not_logged_on;
_ -> mess_client ! {message_to, ToName, Message},
ok
end.
client(Server_Node, Name) ->
{messenger, Server_Node} ! {self(), goOnline, Name},
await_result(),
client(Server_Node).
client(Server_Node) ->
receive
logoff ->
{messenger, Server_Node} ! {self(), logoff},
exit(normal);
{message_to, ToName, Message} ->
{messenger, Server_Node} ! {self(), message_to, ToName, Message},
await_result();
{message_from, FromName, Message} ->
io:format("Message From ~p: ~p~n", [FromName, Message])
end,
client(Server_Node).
await_result() ->
receive
{messenger, stop, Why} ->
io:format("~p~n", [Why]),
exit(normal);
{messneger, What} ->
io:format("~p~n", [What])
end.
The tutorial says to configure server_node() -> to the node where my messenger server runs. How do I configure that function? I don't exactly know what that means, My question would be how do I configure the server_node to register my server location for messenger.
If you start erlang using the command erl -sname somename, you should have a prompt of the form:
(somename#pcname)1>
simply replace the code
server_node() ->
messenger#bill.
by
server_node() ->
somename#pcname.
and recompile the code.
Related
This Erlang based web client issues N concurrent requests to a given URL, e.g., escript client.erl http://127.0.0.1:1234/random?num=1000 3200 issues 3200 concurrent requests. For completeness, a corresponding server can be found here.
The client uses spawn_link to spawn N processes to issue one request each. The success/failure of httpc:request call in the spawned process is handled in the spawned process (within the dispatch_requests function) and propagated to the parent process. The parent process also handles both data messages from a spawned process and EXIT messages corresponding to the abnormal termination of a spawned process. So, the parent process waits to receive N messages from/about the spawned processes before it terminates normally.
Given the above context, the client hangs on some executions (e.g., the server is terminated forcefully) as the parent process never receives N messages from the children processes. I am observing this behavior on Raspberry Pi 3B running Raspbian 9.9 and esl-erlang 22.0-1.
The parent process does not seem to be not handling all cases of termination of child processes. If so, what are these cases? If not, what might be the reason for fewer than N messages?
Client code:
% escript client.erl http://127.0.0.1:1234/random?num=5 30
-module(client).
-export([main/1]).
-import(httpc, [request/1]).
-mode(compile).
dispatch_request(Url, Parent) ->
Start = erlang:monotonic_time(microsecond),
{Status, Value} = httpc:request(get, {Url, []}, [{timeout, 60000}], []),
Elapsed_Time = (erlang:monotonic_time(microsecond) - Start) / 1000,
Msg = case Status of
ok ->
io_lib:format("~pms OK", [Elapsed_Time]);
error ->
io_lib:format("~pms REQ_ERR ~p", [Elapsed_Time, element(1, Value)])
end,
Parent ! {Status, Msg}.
wait_on_children(0, NumOfSucc, NumOfFail) ->
io:format("Success: ~p~n", [NumOfSucc]),
io:format("Failure: ~p~n", [NumOfFail]);
wait_on_children(Num, NumOfSucc, NumOfFail) ->
receive
{'EXIT', ChildPid, {ErrorCode, _}} ->
io:format("Child ~p crashed ~p~n", [ChildPid, ErrorCode]),
wait_on_children(Num - 1, NumOfSucc, NumOfFail);
{Verdict, Msg} ->
io:format("~s~n", [Msg]),
case Verdict of
ok -> wait_on_children(Num - 1, NumOfSucc + 1, NumOfFail);
error -> wait_on_children(Num - 1, NumOfSucc, NumOfFail + 1)
end
end.
main(Args) ->
inets:start(),
Url = lists:nth(1, Args),
Num = list_to_integer(lists:nth(2, Args)),
Parent = self(),
process_flag(trap_exit, true),
[spawn_link(fun() -> dispatch_request(Url, Parent) end) ||
_ <- lists:seq(1, Num)],
wait_on_children(Num, 0, 0).
I'm trying to talk in a chatroom using erlang. Everything is almost good in it I think but none of the messages send to other users print to the console. I sent up some tests to see if anything would print and it looks like some stuff is printing, but none of the stuff that is supposed to be being message passed.
This chat example is taken from page 103 of this erlang documentation manual. I'm following the instructions that they post for peter, james, and fred talking in the chatroom on page 104, but I get no console output. I really get no notifications that the commands worked other than the standard ok or true that returns after I execute a funciton.
One thing that I think might be the cause is that I don't know what they mean when they state Configure the server_node() function.. I also just compiled the code by opening up erl -sname <name#messenger> and then typing c(messenger).
I want to know how to get the output that should be happening, and how to make sure I can do message passing between each of the members on the chatroom.
messenger.erl
-module(messenger).
-export([start_server/0, server/1, logon/1, logoff/0, message/2, client/2]).
%%% Change the function below to return the name of the node where the
%%% messenger server runs
server_node() ->
messenger#super.
%%% This is the server process for the "messenger"
%%% the user list has the format [{ClientPid1, Name1},{ClientPid22, Name2},...]
server(User_List) ->
receive
{From, logon, Name} ->
New_User_List = server_logon(From, Name, User_List),
server(New_User_List);
{From, logoff} ->
New_User_List = server_logoff(From, User_List),
server(New_User_List);
{From, message_to, To, Message} ->
server_transfer(From, To, Message, User_List),
io:format("list is now: ~p~n", [User_List]),
server(User_List)
end.
%%% Start the server
start_server() ->
register(messenger, spawn(messenger, server, [[]])).
%%% Server adds a new user to the user list
server_logon(From, Name, User_List) ->
%% check if logged on anywhere else
case lists:keymember(Name, 2, User_List) of
true ->
From ! {messenger, stop, user_exists_at_other_node}, %reject logon
User_List;
false ->
From ! {messenger, logged_on},
[{From, Name} | User_List] %add user to the list
end.
%%% Server deletes a user from the user list
server_logoff(From, User_List) ->
lists:keydelete(From, 1, User_List).
%%% Server transfers a message between user
server_transfer(From, To, Message, User_List) ->
%% check that the user is logged on and who he is
case lists:keysearch(From, 1, User_List) of
false ->
From ! {messenger, stop, you_are_not_logged_on};
{value, {From, Name}} ->
server_transfer(From, Name, To, Message, User_List)
end.
%%% If the user exists, send the message
server_transfer(From, Name, To, Message, User_List) ->
%% Find the receiver and send the message
case lists:keysearch(To, 2, User_List) of
false ->
From ! {messenger, receiver_not_found};
{value, {ToPid, To}} ->
ToPid ! {message_from, Name, Message},
From ! {messenger, sent}
end.
%%% User Commands
logon(Name) ->
case whereis(mess_client) of
undefined ->
register(mess_client,
spawn(messenger, client, [server_node(), Name]));
_ -> already_logged_on
end.
logoff() ->
mess_client ! logoff.
message(ToName, Message) ->
case whereis(mess_client) of % Test if the client is running
undefined ->
not_logged_on;
_ -> mess_client ! {message_to, ToName, Message},
ok
end.
%%% The client process which runs on each server node
client(Server_Node, Name) ->
{messenger, Server_Node} ! {self(), logon, Name},
await_result(),
client(Server_Node).
client(Server_Node) ->
receive
logoff ->
{messenger, Server_Node} ! {self(), logoff},
exit(normal);
{message_to, ToName, Message} ->
{messenger, Server_Node} ! {self(), message_to, ToName, Message},
await_result();
{message_from, FromName, Message} ->
io:format("Message from ~p: ~p~n", [FromName, Message])
end,
client(Server_Node).
%%% wait for a response from the server
await_result() ->
receive
{messenger, stop, Why} -> % Stop the client
io:format("~p~n", [Why]),
exit(normal);
{messenger, What} -> % Normal response
io:format("~p~n", [What])
end.
Is there a way to apply timeout on simple module method call,
As example,
my_method(Name)->
timer:sleep(2000),
io:format("hello world ~p!~n",[Name]).
I want to add timeout option to above method, is there a way to do it?
You could spawn your function and wait for a a message back. You can set a timeout while you wait on the receive.
my_method(Name)->
YourTimeOut = 10,
Self = self(),
_Pid = spawn(fun()->
timer:sleep(2000),
io:format("hello world ~p!~n",[Name]),
Self ! {self(), ok} end),
receive
{_PidSpawned, ok} -> ok
after
YourTimeOut -> timout
end.
See gen:call/3,4 implementation. It is done by
do_call(Process, Label, Request, Timeout) ->
try erlang:monitor(process, Process) of
Mref ->
%% If the monitor/2 call failed to set up a connection to a
%% remote node, we don't want the '!' operator to attempt
%% to set up the connection again. (If the monitor/2 call
%% failed due to an expired timeout, '!' too would probably
%% have to wait for the timeout to expire.) Therefore,
%% use erlang:send/3 with the 'noconnect' option so that it
%% will fail immediately if there is no connection to the
%% remote node.
catch erlang:send(Process, {Label, {self(), Mref}, Request},
[noconnect]),
receive
{Mref, Reply} ->
erlang:demonitor(Mref, [flush]),
{ok, Reply};
{'DOWN', Mref, _, _, noconnection} ->
Node = get_node(Process),
exit({nodedown, Node});
{'DOWN', Mref, _, _, Reason} ->
exit(Reason)
after Timeout -> %% <-- HERE
erlang:demonitor(Mref, [flush]),
exit(timeout)
end
catch
error:_ ->
%% Node (C/Java?) is not supporting the monitor.
%% The other possible case -- this node is not distributed
%% -- should have been handled earlier.
%% Do the best possible with monitor_node/2.
%% This code may hang indefinitely if the Process
%% does not exist. It is only used for featureweak remote nodes.
Node = get_node(Process),
monitor_node(Node, true),
receive
{nodedown, Node} ->
monitor_node(Node, false),
exit({nodedown, Node})
after 0 ->
Tag = make_ref(),
Process ! {Label, {self(), Tag}, Request},
wait_resp(Node, Tag, Timeout) %% <-- HERE for C/Java nodes
end
end.
From an old exam, there is a question I don't understand the answer to. The module for the question looks like this:
-module(p4).
-export([start/0, init/0, f1/1, f2/1, f3/2]).
start() ->
spawn(fun() -> init() end).
init() -> loop(0).
loop(N) ->
receive
{f1, Pid} ->
Pid ! {f1r, self(), N},
loop(N);
{f2, Pid} ->
Pid ! {f2r, self()},
loop(N+1);
{f3, Pid, M} ->
Pid ! {f3r, self()},
loop(M)
end.
f1(Serv) ->
Serv ! {f1, self()},
receive {f1r, Serv, N} -> N end.
f2(Serv) ->
Serv ! {f2, self()},
receive {f2r, Serv} -> ok end.
f3(Serv, N) ->
Serv ! {f3, self(), N},
receive {f3r, Serv} -> ok end.
The question asks to consider the following function as part of the code, and what the result of the function would be. The correct answer is 2. I would think it'd be 3, since the "increase-call" f2(Server) is after the response of self()!{f1r, Server, 2}.
test3() ->
Server = start(),
self()!{f1r, Server, 2},
f2(Server),
f1(Server).
My questions are:
Why is the answer 2 and not 3, AND how does the self()!{f1r, Server, 2} work?
Isn't the self()!{f1r, Server, 2} a response from the receive clause in the loop(N) of the f1(Serv) function?
self()!{f1r, Server, 2} sends the {f1r, Server, 2} to itself.
This message will wait in the inbox until received.
Then f2 gets executed and then f1.
It is in this last function, when the last line receive {f1r, Serv, N} -> N end is executed, the process running test3 receives the message waiting in the inbox (the one sent to itself) and returns the 2 in that message.
Note that at the end of the program, there would have been a {f1r, <process number>, N} waiting in the inbox, N having a value of 1.
I wish to use OCaml to access the Yahoo Finance API. Essentially, it will be just a bunch of HTTP requests to get quotes from Yahoo Finance.
Which module I should use?
I wish to have async HTTP requests.
There are possibilities using lwt:
ocsigen has a quite complete and a bit complex implementation
cohttp is a bit simpler but lacks some usefull parts
using opam to install:
$ opam install ocsigenserver cohttp
For instance in a toplevel:
try Topdirs.dir_directory (Sys.getenv "OCAML_TOPLEVEL_PATH") with _ -> ();;
#use "topfind";;
#thread;;
#require "ocsigenserver";;
open Lwt
(* a simple function to access the content of the response *)
let content = function
| { Ocsigen_http_frame.frame_content = Some v } ->
Ocsigen_stream.string_of_stream 100000 (Ocsigen_stream.get v)
| _ -> return ""
(* launch both requests in parallel *)
let t = Lwt_list.map_p Ocsigen_http_client.get_url
[ "http://ocsigen.org/";
"http://stackoverflow.com/" ]
(* maps the result through the content function *)
let t2 = t >>= Lwt_list.map_p content
(* launch the event loop *)
let result = Lwt_main.run t2
and using cohttp:
try Topdirs.dir_directory (Sys.getenv "OCAML_TOPLEVEL_PATH") with _ -> ();;
#use "topfind";;
#require "cohttp.lwt";;
open Lwt
(* a simple function to access the content of the response *)
let content = function
| Some (_, body) -> Cohttp_lwt_unix.Body.string_of_body body
| _ -> return ""
(* launch both requests in parallel *)
let t = Lwt_list.map_p Cohttp_lwt_unix.Client.get
(List.map Uri.of_string
[ "http://example.org/";
"http://example2.org/" ])
(* maps the result through the content function *)
let t2 = t >>= Lwt_list.map_p content
(* launch the event loop *)
let v = Lwt_main.run t2
Notice that an implementation of cohttp for jane street async library is also available
Just for the record, there is also ocurl with curl multi API support.