Danil Eremeev avatar Danil Eremeev committed 62caad2

Initial release

Comments (0)

Files changed (5)

+ERLC_FLAGS=
+SOURCES=$(wildcard src/*.erl)
+HEADERS=$(wildcard src/*.hrl)
+OBJECTS=$(SOURCES:src/%.erl=ebin/%.beam)
+all: $(OBJECTS) tests 
+
+tests:
+	erl -pa ebin -eval 'eunit:test("ebin",[verbose])' -s init stop
+
+ebin/%.beam : src/%.erl $(HEADERS)
+	erlc $(ERLC_FLAGS) -o ebin/ $<
+
+clean:
+	-rm $(OBJECTS)
+
+start: $(OBJECTS) tests 
+	./start.sh
+-include_lib("eunit/include/eunit.hrl").
+-module(mth).
+-include("global.hrl").
+-export([primes/1,pow_mod/3,mr/1,mr/2,testing_go/1,ttst/2]).
+
+%parameters D and S for Miller Rabin test
+get_ds(D, S) when D rem 2 == 0 ->
+    get_ds(D div 2, S+1);
+get_ds(D, S) ->
+    {D, S}.
+
+get_ds(N) ->
+    get_ds(N-1, 0).
+
+%two public function with RepeetCount default parameter
+mr(N, RepeetCount) ->
+	mr(get_ds(N), N, RepeetCount).
+mr(1) -> prime;
+mr(2) -> prime;
+mr(N) when (N rem 2) == 0 -> complex;
+mr(N) ->
+	mr(get_ds(N), N, 20).
+
+%Miller Rabin pass ------
+%first - is for first check in Miller-Rabin algorythm
+mr(first, DS, N, RepeetCount, X, SCnt) when X == 1 ->
+	mr(DS, N, RepeetCount-1);
+%this check works for first and second check in algo
+mr(_, DS, N, RepeetCount, X, SCnt) when X == N - 1 ->
+	mr(DS, N, RepeetCount-1);
+%second check
+mr(_, {D, S}, N, RepeetCount, X, SCnt) when SCnt > 0 ->
+	mr(1, {D, S}, N, RepeetCount-1, X*X rem N, SCnt - 1); % first param is no matter
+%second check failed
+mr(_, {D, S}, N, RepeetCount, X, SCnt) ->
+	complex.
+
+mr({D, S}, N, RepeetCount) when RepeetCount > 0 ->
+	mr(first, {D,S}, N, RepeetCount, pow_mod(1 + random:uniform(N-2), D, N), S);
+mr({D, S}, N, RepeetCount) ->
+	prime.
+
+pow(N,2)->
+	N*N.
+
+pow_mod(B, E, M) ->
+    case E of
+        0 -> 1;
+        E when E rem 2 == 0 ->
+            pow(pow_mod(B, E div 2, M), 2) rem M;
+        _Else ->
+            (B*pow_mod(B, E-1, M)) rem M
+    end.
+
+ttst(C,N) when C>0 ->
+	mr(N),
+	ttst(C-1,N).
+
+%test with prime numbers generated by
+%dump algorithm
+global_prime_test()->
+	testing_go(100000).
+
+testing_go(N)->
+	tst_ch(prime,mr(3), primes(N)).
+
+tst_ch(prime, prime,[])->
+	ok;
+tst_ch(PrResult, MrResult, [H|T])->
+	io:format("~p ~n",[H]),
+	?assertEqual(PrResult,MrResult),
+	?assertEqual(complex,mr(H+1)),
+	tst_ch(prime, mr(H), T).
+
+get_ds_num_test() -> ?assertEqual({1, 1},get_ds(3)).
+get_process_test() -> ?assertEqual({1, 1},get_ds(2,0)).
+get_final_test() -> ?assertEqual({1,1},get_ds(1,1)).
+
+%dumb algorithm for prime numbers
+primes(Prime, Max, Primes, Integers) when Prime > Max ->
+    lists:reverse([Prime|Primes]) ++ Integers;
+primes(Prime, Max, Primes, Integers) ->
+    [NewPrime|NewIntegers] = [ X || X <- Integers, X rem Prime =/= 0 ],
+    primes(NewPrime, Max, [Prime|Primes], NewIntegers).
+primes(N) ->
+    primes(3, round(math:sqrt(N)), [], lists:seq(3, N, 2)). % skip odds
+
+primes_final_test() ->
+	?assertEqual([2, 3, 5, 7], primes(7, 5, [5, 3, 2], [])).
+primes_main_test() ->
+	?assertEqual([2, 3, 5, 7], primes(2, math:sqrt(8), [], [3, 5, 7])).
+primes_full_test() ->
+	?assertEqual([3, 5, 7], primes(8)).

src/primerator.erl

+-module(primerator).
+-include("global.hrl").
+-export([start/0, start/2, stop/0, go/1, create_worker/1,async_receive/1]).
+
+%current task
+%%Task that we send to worker is {FromNumber, Count}. 
+%%This describes the record of main information about global task in supervisor
+-record(tasks,{
+				max=2,						%maximum number for our prime numbers
+				current=2,					%current number where we working now
+				packet_size=10000,			%how much work send to child worker
+				returned=[]					%returned tasks from died workers, or any other reason
+			}).
+%Process state
+-record(proc_state, {
+						parent_pid=-1,		% if -1 - we have main supervisor. Others save here pid of their's parent
+						childs=dict:new(),	% main supervisor holds here childs pids with child_info hash {pid->#child_info}
+						data=[],			% main sup. holds here prime numbers
+						condition=resting,	% process condition resting or working
+						undone_work=#tasks{}% current task information
+					}).
+%What main supervisor know about child
+-record(child_info,{
+					work={0,0},		%current work {StartNumber,NumbersCount (include start num)}
+					condition=resting	%resting | working
+					}).
+
+%start processing only if main process started
+go(Num) ->
+	case whereis(primerator_pid) of
+		undefined ->
+			start(),
+			go(Num);
+		_ -> 
+			primerator_pid!{go,Num}
+	end,
+	ok.
+
+%Start slave worker
+% without paret pid do not start
+start(as_slave, undefined) -> false;
+start(as_slave, ParentPid) ->
+	process_flag(trap_exit, true),
+	loop( #proc_state{parent_pid=ParentPid} ).
+
+%create new worker and register it in our server on specified node
+create_worker(Node)->
+	case whereis(primerator_pid) of
+		undefined->
+			false;
+		Pid -> 
+			NewPid = spawn(Node, primerator,start, [as_slave, whereis(primerator_pid)]),
+			Pid!{add_workers,[NewPid|[]]},
+			NewPid
+	end.
+
+%start master node
+start() ->
+	start({start_params,10000,2}).
+
+%start with specified params
+%ToDo: if start without stopping after first run I'm forgot about settings
+%like "current"  -> i'm set it again, so this is known bug
+start({start_params, PacketSize, StartFrom}) ->
+	io:format("Going to start node process... ~n"),
+	Pid = spawn(fun() -> process_flag(trap_exit, true), loop(#proc_state{undone_work=#tasks{packet_size=PacketSize,current=StartFrom}}) end),
+	register(primerator_pid, Pid),
+	create_worker(node()),
+	{ok, Pid}.
+
+stop() ->
+	case whereis(primerator_pid) of
+		undefined ->
+			false;
+		_ ->
+			primerator_pid ! {stop},
+			unregister(primerator_pid)		%we want forget about it
+	end.	
+
+%%Sending stop signal to all child workers
+slaves_stop([]) ->
+	ok;
+slaves_stop([Pid|Pids]) ->
+	Pid!{stop},
+	slaves_stop(Pids).
+
+%need for debugging and helps with get info from process
+out_result(Pid,Data) ->
+	case is_pid(Pid) of
+		true -> Pid!{resp,Data};
+		false -> io:format("~p ~n",[Data])
+	end.
+
+loop(State) ->
+	Condition = State#proc_state.condition,
+	ParentPid = State#proc_state.parent_pid,
+	receive
+		%start working
+		{go, Num} when Condition =:= resting, ParentPid =:= -1 -> 
+			io:format("Start working on number ~p ~n",[Num]),
+			UndoneWork = State#proc_state.undone_work,
+			NewState = give_them_work(State#proc_state{data=[], undone_work=UndoneWork#tasks{max=Num,current=2}}),
+			loop(NewState#proc_state{condition=working});
+
+		{get_result, Pid} ->
+			case Condition of 
+				resting -> out_result(Pid, State#proc_state.data);
+				working -> out_result(Pid, [])
+			end,
+			loop(State);
+
+		{get_percents, Pid} ->
+			case Condition of 
+				resting -> out_result(Pid, 100.0);
+				working -> out_result(Pid, get_percents(State))
+			end,
+			loop(State);
+
+		%data from child superviser
+		{done, Pid, Data} when ParentPid =:= -1 ->
+			io:format("~p Worker done it's work ~p ~n",[Pid, Data]),
+			NewState = child_report(Pid, Data, State),
+			loop(NewState);
+		
+		%data from hardworker
+		{done, _, Data} ->
+			ParentPid!{done,self(),Data},
+			loop(State#proc_state{condition=resting});
+			
+		%add worker to us
+		{add_workers, Pids} when ParentPid =:= -1 ->
+			New_childs = add_workers(Pids, State#proc_state.childs),
+			NewState = give_them_work(State#proc_state{childs=New_childs}),
+			loop(NewState);
+
+		%childs get work
+		{new_work, {From, Cnt}} when Condition =:= resting ->
+			io:format("Got new work from ~p, ~p count ~n",[From, Cnt]),
+			MyPid = self(),
+			spawn_link(
+				fun () -> 
+					process_work(From, Cnt, MyPid) 
+				end
+			),
+			loop(State#proc_state{condition=working});
+
+		%stopping master
+		{stop} when ParentPid =:= -1 ->
+			slaves_stop(dict:fetch_keys(State#proc_state.childs)),
+			ok;
+
+		%stopping child
+		{stop} ->
+			io:format("children stopped ~n"),
+			ok;
+
+		{'EXIT',Pid, _} when ParentPid =:= -1 ->
+			io:format("Child died ~p ~n",[Pid]),
+			NewState = child_died(Pid, State),
+			loop(NewState);
+
+		{'EXIT',_, Reason} ->
+			case Reason of
+				normal -> loop(State);
+				_ ->
+					%something was happen with child, i'm dying
+					ok
+			end;
+			
+		_unknown -> 
+			io:format("Unrecognized message, im dying. Message was: ~p ~n",[_unknown])
+	end.
+
+%%Linking to new workers, looking that we have't this worker yet
+add_workers([], Workers) ->	%all nodes started, return pid's list
+	Workers;                   %we need to know them to give them work 
+add_workers([Node|Nodes], Workers) ->
+	case dict:is_key(Node,Workers) of
+		true ->
+			add_workers(Nodes, Workers);
+		false ->
+			case is_pid(Node) of
+				true ->
+					link(Node),
+					add_workers(Nodes,dict:store(Node,#child_info{},Workers));
+				false ->
+					add_workers(Nodes, Workers)
+			end
+	end.
+
+% Select one free worker and call callback for it
+% if worker was selected - return {ok, workers_dict}
+% else return {false}
+choose_worker(SetWorkFunc, Workers) ->
+	choose_worker(SetWorkFunc, dict:fetch_keys(Workers), Workers).
+choose_worker(_, [], _) ->
+	{false};
+choose_worker(SetWorkFunc, [Worker| Workers], WorkersDict) ->
+	WorkerInfo = dict:fetch(Worker,WorkersDict),
+	case WorkerInfo#child_info.condition of
+		resting ->
+			New_Worker_Info = SetWorkFunc(Worker, WorkerInfo),
+			NewDict = dict:store(Worker,New_Worker_Info#child_info{condition=working}, WorkersDict),
+			{ok, NewDict};
+		working ->
+			choose_worker(SetWorkFunc, Workers, WorkersDict)
+	end.
+
+%%%%%
+% Setting work from task
+% returns { #tasks, WorkersDict } 
+%%%%%
+% we have no work here
+set_work({false,UndoneWorks}, WorkersDict)->
+	{UndoneWorks,WorkersDict};
+%get free worker, if any -> start new task in it
+%look at result and return work to undone array if we can't find worker
+set_work({Work,UndoneWorks=#tasks{returned=Returned}}, WorkersDict)->
+	Ret = choose_worker(
+			fun(Pid, WrkrInfo) -> 
+				io:format("Send work to child: ~p ~n",[Work]),
+				Pid!{new_work, Work},
+				WrkrInfo#child_info{work=Work}
+			end,
+			WorkersDict
+		),
+	case Ret of
+		{ok, Workers} -> set_work(split_work(UndoneWorks),Workers);
+		{false} -> {UndoneWorks#tasks{returned=[Work|Returned]}, WorkersDict}
+	end.
+
+% Splitting work to small tasks
+% returned {{Start,Count}, #task} or {false, #tasks} if no work
+%if we have returned work get it first
+split_work(Tasks=#tasks{max=MaxNumber,current=MaxNumber, returned=[]}) ->
+	{false, Tasks};
+split_work(Tasks=#tasks{max=MaxNumber,current=Current,packet_size=PartSize, returned=[]}) ->
+	case Current+PartSize of
+		Num when Num > MaxNumber ->
+			{{Current,MaxNumber-Current+1}, Tasks#tasks{current=MaxNumber}};
+		Num ->
+			{{Current,PartSize},Tasks#tasks{current=Num}}
+	end;
+split_work(Tasks=#tasks{returned=[Ret|Rets]}) ->
+	{Ret,Tasks#tasks{returned=Rets}}.
+
+%%%
+% Return percents of task done
+%%%
+get_percents(State) ->
+	get_percents(State#proc_state.undone_work, 0).
+%Get all not finished returned tasks and counting numbers to check
+get_percents(Tasks = #tasks{returned=[{_,Cnt}|Works]},Numbers) ->
+	get_percents(Tasks#tasks{returned=Works}, Numbers+Cnt);
+get_percents(#tasks{max=Max,current=Current,returned=[]},Numbers) ->
+	((Current+Numbers)/Max)*100.
+
+%%%%
+% Child reports about done work. Change worker status, and get new work if any
+% return updated State
+%%%%
+child_report(Pid, Data,	State = #proc_state{childs=Childs, data=OldData} ) ->
+	New_childs = dict:store(Pid, #child_info{work={0,0},condition=resting},Childs),
+	give_them_work(State#proc_state{childs=New_childs, data = lists:append(Data, OldData)}).
+
+%%%%
+% Child reports about dying if it was working - return this work to work pool, 
+% and try to find another worker
+% return updated State
+%%%%
+child_died(Pid, State=#proc_state{undone_work=#tasks{returned=Returned}, childs=Childs})->
+	#child_info{work=DiedWork,condition=Cond} = dict:fetch(Pid, Childs),
+	case Cond of
+		working ->
+				UndoneWork = State#proc_state.undone_work,
+				New_childs = dict:erase(Pid, Childs),
+				give_them_work(State#proc_state{
+									childs=New_childs,
+									undone_work=UndoneWork#tasks{returned=[DiedWork|Returned]}
+									}
+					);
+		resting -> 
+			State
+	end.
+
+%%%
+%Trying to find any worker for task. and checking our status
+%%%
+give_them_work(State=#proc_state{undone_work=UndoneWork, childs=Childs})->
+	{NewUndoneWork, Wrkrs} = set_work(split_work(UndoneWork), Childs),
+	NewStatus = case i_m_done(NewUndoneWork,dict:fetch_keys(Wrkrs) ,Wrkrs) of 
+				true -> resting;
+				false -> working
+			end,
+	State#proc_state{undone_work=NewUndoneWork, childs = Wrkrs,condition=NewStatus}.
+
+%%%
+% Testing that main process done all work.
+%%%
+%Looked at UndoneWork record for done. also all pids are checked
+i_m_done(#tasks{max=MaxNumber,current=MaxNumber, returned=[]}, [], _)->
+	true;
+i_m_done(Tasks=#tasks{max=MaxNumber,current=MaxNumber, returned=[]},[Pid | Pids], Wrks)->
+	%find that pid not in work condition
+	#child_info{condition=Condition} = dict:fetch(Pid, Wrks),
+	case Condition of
+		resting -> i_m_done(Tasks, Pids, Wrks);
+		working -> false
+	end;
+i_m_done(_,_,_)->
+	false.
+
+%%%%
+% Main work function
+%%%%
+process_work(StartFrom, Count, ParentPid) ->
+	Accum = find_primes(StartFrom, Count,[]),
+	ParentPid!{
+				done,
+				self(),
+				Accum
+				}.
+% Find Prime numbers for each number in diapason
+% even numbers removed in math library, so don't care
+find_primes(_, 0, Accum) ->	Accum;
+find_primes(Number, Cnt, Accum)->
+	case mth:mr(Number) of  
+		prime ->
+			find_primes(Number+1, Cnt-1, [Number | Accum]);
+		complex ->
+			find_primes(Number+1, Cnt-1, Accum)
+	end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%		TEST CASES
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%helper function
+%use it from console:
+%ex: primerator:async_receive(get_percents) or primerator:async_receive(get_result)
+% NOT TESTED, only for debugging
+async_receive(Cmd)->
+	case whereis(primerator_pid) of
+		undefined -> false;
+		Pid -> Pid!{Cmd,self()}
+	end,
+	timer:sleep(10),
+	receive
+		{resp, Data}-> Data
+	end.
+
+test_loop_func(Timeout)->
+    receive
+    after Timeout ->
+        ok
+    end.      
+
+get_percents_test() ->
+	State = #proc_state{undone_work=#tasks{max=11,current=0,returned=[]}},
+	?assertEqual(0.0,get_percents(State)),
+	?assertEqual(50.0,get_percents(State#proc_state{undone_work=#tasks{max=10,current=0,returned=[{0,5}]}})),
+	?assertEqual(50.0,get_percents(State#proc_state{undone_work=#tasks{max=100,current=25,returned=[{0,25}]}})).
+
+find_primes_test()->
+	%Include last one number
+	A = find_primes(1,11,[]),
+	?assertEqual([1,2,3,5,7,11],lists:sort(A)).
+find_primes_ForOneNumber_test()->
+	%Include last one number
+	A = find_primes(3,1,[]),
+	?assertEqual([3],lists:sort(A)).
+find_primes_EmptyResult_test()->
+	%Include last one number
+	A = find_primes(4,1,[]),
+	?assertEqual([],lists:sort(A)).
+
+split_work_ChangedInterval_test()->
+	Tasks = #tasks{max=11,current=2},
+	{Work,NewTasks} = split_work(Tasks),
+	?assertEqual({2,11-1},Work),
+	?assertEqual(Tasks#tasks{current=11, max=11},NewTasks),
+	%return this work and get it again
+	NT = NewTasks#tasks{returned=[Work|NewTasks#tasks.returned]},
+	{NewWork,NewTasks2} = split_work(NT),
+	?assertEqual(Work,NewWork),
+	?assertEqual(Tasks#tasks{current=11, max=11},NewTasks2).
+split_work_SmallInterval_test()->
+	Tasks = #tasks{max=11,current=2,packet_size=2},
+	{Work,NewTasks} = split_work(Tasks),
+	?assertEqual({2,2},Work),
+	{Work2,NewTasks2} = split_work(NewTasks),
+	?assertEqual({4,2},Work2),
+	{Work3,NewTasks3} = split_work(NewTasks2),
+	?assertEqual({6,2},Work3),
+	{Work4,NewTasks4} = split_work(NewTasks3),
+	?assertEqual({8,2},Work4),
+	{Work5,NewTasks5} = split_work(NewTasks4),
+	?assertEqual({10,2},Work5),
+	?assertEqual(Tasks#tasks{current=11, max=11},NewTasks5).
+
+child_done_test()->
+	D = dict:store(1,#child_info{work={6,6}},dict:new()),
+	D1 = dict:store(2,#child_info{work={1,5},condition=resting},D),
+	State = #proc_state{childs=D1,data=[1,2,3,5],undone_work=#tasks{max=11,current=11}},
+	#proc_state{data=Primes,childs=Childs} = child_report(1,[7],State),
+	?assertEqual([1,2,3,5,7], lists:sort(Primes)),
+	?assertEqual(#child_info{work={0,0},condition=resting}, dict:fetch(1,Childs)).
+
+choose_worker_test()->
+	D = dict:store(1,#child_info{},dict:new()),
+	D1 = dict:store(2,#child_info{},D),
+	{Ret, A} = choose_worker(fun (_,WrkrInfo) -> WrkrInfo end, D1),
+	?assertEqual(ok,Ret),
+	?assertEqual(#child_info{condition=resting},dict:fetch(1,A)),
+	?assertEqual(#child_info{condition=working},dict:fetch(2,A)).
+choose_worker_AllBusy_test()->
+	D = dict:store(1,#child_info{condition=working},dict:new()),
+	D1 = dict:store(2,#child_info{condition=working},D),
+	?assertEqual({false},choose_worker(fun (_,WrkrInfo) -> WrkrInfo end, D1)).
+choose_worker_OneBusy_test()->
+	D = dict:store(1,#child_info{},dict:new()),
+	D1 = dict:store(2,#child_info{condition=working},D),
+	{Ret, A} = choose_worker(fun (_,WrkrInfo) -> WrkrInfo end, D1),
+	?assertEqual(ok,Ret),
+	?assertEqual(#child_info{condition=working},dict:fetch(1,A)),
+	?assertEqual(#child_info{condition=working},dict:fetch(2,A)).
+
+%same workers do not added and not changed already exist worker data
+add_workers_test()->
+	PidWrk = spawn(fun ()-> test_loop_func(5) end),
+	A=dict:store(PidWrk, #child_info{condition=working},dict:new()),
+	Pids=dict:store(spawn(fun ()-> test_loop_func(5) end),#child_info{condition=resting},A),
+	NewPids = [PidWrk | [spawn(fun ()-> test_loop_func(5) end) | [ spawn(fun ()-> test_loop_func(5) end) | [] ] ] ],
+	Ret = add_workers(NewPids,Pids),
+	?assertEqual(4,length(dict:fetch_keys(Ret))),
+	?assertEqual(#child_info{condition=working},dict:fetch(PidWrk,Ret)).
+
+set_work_AllBusy_test()->
+	Tasks = #tasks{max=11,current=2},
+	PidWrk1 = spawn(fun ()-> test_loop_func(5) end),
+	PidWrk2 = spawn(fun ()-> test_loop_func(5) end),
+	D = dict:store(PidWrk1,#child_info{condition=working},dict:new()),
+	Wrkrs = dict:store(PidWrk2,#child_info{condition=working},D),
+
+	{UndoneWorks, Workers} = set_work(split_work(Tasks), Wrkrs),
+	?assertEqual(2,length(dict:fetch_keys(Workers))),
+	?assertEqual(Tasks#tasks{max=11,current=11,returned=[{2,11-1}]},UndoneWorks).
+set_work_OneFree_test()->
+	Tasks = #tasks{max=11,current=2},
+	PidWrk1 = spawn(fun ()-> test_loop_func(5) end),
+	PidWrk2 = spawn(fun ()-> test_loop_func(5) end),
+	D = dict:store(PidWrk1,#child_info{condition=working},dict:new()),
+	Wrkrs = dict:store(PidWrk2,#child_info{condition=resting},D),
+
+	{UndoneWorks, Workers} = set_work(split_work(Tasks), Wrkrs),
+	?assertEqual(2,length(dict:fetch_keys(Workers))),
+	?assertEqual(Tasks#tasks{max=11,current=11,returned=[]},UndoneWorks).
+
+start_stop_test()->
+	{Ret,Pid}= start(),
+	?assertEqual(true, is_pid(Pid)),
+	?assertEqual(ok,Ret),
+	?assertEqual(Pid,whereis(primerator_pid)),
+	stop(),
+	?assertEqual(undefined,whereis(primerator_pid)).
+
+create_worker_test()->
+	R=create_worker(node()),
+	?assertEqual(false,R),
+
+	{Ret,Pid}= start(),
+	?assertEqual(true, is_pid(Pid)),
+	?assertEqual(ok,Ret),
+	?assertEqual(Pid,whereis(primerator_pid)),
+
+	NewPid=create_worker(node()),
+	?assertEqual(true,is_pid(NewPid)),
+
+	stop(),
+	timer:sleep(10),
+
+	?assertEqual(undefined,whereis(primerator_pid)),
+	?assertEqual(undefined,process_info(NewPid)).
+
+start_result_WithOneWorker_test()->
+	start(),
+	go(11),
+	timer:sleep(50),
+	?assertEqual(100.0,async_receive(get_percents)),
+	?assertEqual([2,3,5,7,11],lists:sort(async_receive(get_result))),
+	stop().
+start_result_WithManyWorkers_test()->
+	start(),
+	create_worker(node()),
+	create_worker(node()),
+	create_worker(node()),
+	create_worker(node()),
+	create_worker(node()),
+	create_worker(node()),
+	create_worker(node()),
+	create_worker(node()),
+	create_worker(node()),
+	create_worker(node()),
+	create_worker(node()),
+	create_worker(node()),
+	go(11),
+	timer:sleep(50),
+	?assertEqual(100.0,async_receive(get_percents)),
+	?assertEqual([2,3,5,7,11],lists:sort(async_receive(get_result))),
+	stop(),
+	timer:sleep(10).
+start_result_WithKillWorker_test()->
+	start({start_params,2,2}),
+	primerator_pid!{add_workers,[spawn(fun ()->test_loop_func(100) end)|[]]},
+	go(11),
+	timer:sleep(500),
+	?assertEqual(100.0,async_receive(get_percents)),
+	?assertEqual([2,3,5,7,11],lists:sort(async_receive(get_result))),
+	stop().
+start_result_StartWithSeveralTasks_test()->
+	start(),
+	go(11),
+	timer:sleep(50),
+	?assertEqual(100.0,async_receive(get_percents)),
+	?assertEqual([2,3,5,7,11],lists:sort(async_receive(get_result))),
+	go(11),
+	timer:sleep(50),
+	?assertEqual(100.0,async_receive(get_percents)),
+	?assertEqual([2,3,5,7,11],lists:sort(async_receive(get_result))),
+	stop().
+#!/bin/bash
+erl -pa ebin -sname testing1 -detached
+sleep 5
+erl -pa ebin -sname testing2 -eval 'net_adm:ping('testing1@danil'), primerator:start(), primerator:create_worker('testing1@danil'), primerator:go(11)' -s init stop
+erl -pa ebin -remsh testing1@danil -eval "init:stop()"
+
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.