+let find_best_moves state lastmove depth =

+ let root = Ai_alphabeta.build state lastmove depth in

+ let children = match force root with Node (_, _, c) -> c in

+ let scores = List.map (score estimate) children in

+ let better = if (get_turn state) mod 2 = 0 then (<=) else (>=) in

+ let init = if (get_turn state) mod 2 = 0 then max_int else min_int in

+ let rec select nodes scores selected score =

+ if nodes = [] then selected

+ let n, s = (List.hd nodes), (List.hd scores) in

+ if better s score then select (List.tl nodes) (List.tl scores) (n:: selected) s

+ else select (List.tl nodes) (List.tl scores) selected score

+ select children scores [] init

+let find_best state selection scores =

+ let better = if (get_turn state) mod 2 = 0 then (<) else (>) in

+ let rec select nodes scores selected score =

+ if nodes = [] then selected

+ let n, s = (List.hd nodes), (List.hd scores) in

+ if better s score then select (List.tl nodes) (List.tl scores) n s

+ else select (List.tl nodes) (List.tl scores) selected score

+ let thunk = select selection scores (List.hd selection) (List.hd scores) in

+ let playouts = 50000 in

+ val mutable last_move = Init

+ let moves = find_best_moves state Init 4 in

+ let nplayouts = playouts / (List.length moves) in

+ let montecarlo selection =

+ match force selection with

+ | Node(_, Put i, _) -> put s i; Ai_montecarlo.estimate s 0 nplayouts turns

+ let scores = List.map montecarlo moves in

+ match find_best state moves scores with

+ | Node(_, Put i, _) -> last_move <- Put i ; i

+ | _ -> failwith "no put found"

+ let moves = find_best_moves state Init 4 in

+ let nplayouts = playouts / (List.length moves) in

+ let montecarlo selection =

+ match force selection with

+ | Node(_, Move (f, g), _) -> move s f g; Ai_montecarlo.estimate s 0 nplayouts turns

+ let scores = List.map montecarlo moves in

+ match find_best state moves scores with

+ | Node(_, Move (f, g), _) -> last_move <- Move (f, g) ; f, g

+ | _ -> failwith "no move found"

+ let moves = find_best_moves state Init 4 in

+ let nplayouts = playouts / (List.length moves) in

+ let montecarlo selection =

+ match force selection with

+ | Node(_, Fly (f, g), _) -> fly s f g; Ai_montecarlo.estimate s 0 nplayouts turns

+ let scores = List.map montecarlo moves in

+ match find_best state moves scores with

+ | Node(_, Fly (f, g), _) -> last_move <- Fly (f, g) ; f, g

+ | _ -> failwith "no fly found"

+ end_of_turn state; (* unrolled later *)

+ let moves = find_best_moves state Init 4 in

+ let nplayouts = playouts / (List.length moves) in

+ let montecarlo selection =

+ match force selection with

+ | Node(_, Capture i, _) -> capture s i; Ai_montecarlo.estimate s 0 nplayouts turns

+ let scores = List.map montecarlo moves in

+ match find_best state moves scores with

+ | Node(_, Capture i, _) -> last_move <- Capture i ; i

+ | _ -> failwith "no capture found"