diff --git a/src/proto_alpha/bin_sc_rollup_node/daemon.ml b/src/proto_alpha/bin_sc_rollup_node/daemon.ml index fb05321ffc40ae814d7cb34177a87934e9293ced..9173ac8089d9bbcf21a7e9ca19b993be0d7209a3 100644 --- a/src/proto_alpha/bin_sc_rollup_node/daemon.ml +++ b/src/proto_alpha/bin_sc_rollup_node/daemon.ml @@ -350,8 +350,6 @@ module Make (PVM : Pvm.S) = struct let message = l1_ctxt.cctxt#message in let* () = message "Stopping L1 monitor@." in l1_ctxt.stopper () ; - let* () = message "Closing L1 events stream@." in - let* () = Lwt_stream.closed l1_ctxt.events in let* () = message "Shutting down L1@." in let* () = Layer1.shutdown store in let* () = message "Shutting down RPC server@." in diff --git a/tezt/lib_tezos/sc_rollup_node.ml b/tezt/lib_tezos/sc_rollup_node.ml index b0d75888222e88d13fd4f1714dc80d626cc2ac2d..3a01a98ecd752f2c964524892bdda92bab1e9539 100644 --- a/tezt/lib_tezos/sc_rollup_node.ml +++ b/tezt/lib_tezos/sc_rollup_node.ml @@ -233,6 +233,34 @@ let handle_event sc_node {name; value} = update_level sc_node level | _ -> () +let wait_for_full ?where node name filter = + let promise, resolver = Lwt.task () in + let current_events = + String_map.find_opt name node.one_shot_event_handlers + |> Option.value ~default:[] + in + node.one_shot_event_handlers <- + String_map.add + name + (Event_handler {filter; resolver} :: current_events) + node.one_shot_event_handlers ; + let* result = promise in + match result with + | None -> + raise (Terminated_before_event {daemon = node.name; event = name; where}) + | Some x -> return x + +let event_from_full_event_filter filter json = + let raw = get_event_from_full_event json in + (* If [json] does not match the correct JSON structure, it + will be filtered out, which will result in ignoring + the current event. + @see raw_event_from_event *) + Option.bind raw (fun {value; _} -> filter value) + +let wait_for ?where node name filter = + wait_for_full ?where node name (event_from_full_event_filter filter) + let create ?(path = Constant.sc_rollup_node) ?name ?color ?data_dir ?event_pipe ?(rpc_host = "127.0.0.1") ?rpc_port ?(operators = []) ?default_operator mode (node : Node.t) (client : Client.t) = diff --git a/tezt/lib_tezos/sc_rollup_node.mli b/tezt/lib_tezos/sc_rollup_node.mli index 3e2992d28064645ed041af174694cf49761507dd..c8c7bd3dbfe98ed44258e39fbfcb18e24c3e6a63 100644 --- a/tezt/lib_tezos/sc_rollup_node.mli +++ b/tezt/lib_tezos/sc_rollup_node.mli @@ -146,3 +146,43 @@ val wait_for_ready : t -> unit Lwt.t If [timeout] is provided, stop waiting if [timeout] seconds have passed. *) val wait_for_level : ?timeout:float -> t -> int -> int Lwt.t + +(** Wait for a custom event to occur. + + Usage: [wait_for_full daemon name filter] + + If an event named [name] occurs, apply [filter] to its + whole json, which is of the form: + {[{ + "fd-sink-item.v0": { + "hostname": "...", + "time_stamp": ..., + "section": [ ... ], + "event": { : ... } + } + }]} + If [filter] returns [None], continue waiting. + If [filter] returns [Some x], return [x]. + + [where] is used as the [where] field of the [Terminated_before_event] exception + if the daemon terminates. It should describe the constraint that [filter] applies, + such as ["field level exists"]. + + It is advised to register such event handlers before starting the daemon, + as if they occur before being registered, they will not trigger your handler. + For instance, you can define a promise with + [let x_event = wait_for daemon "x" (fun x -> Some x)] + and bind it later with [let* x = x_event]. *) +val wait_for_full : + ?where:string -> t -> string -> (JSON.t -> 'a option) -> 'a Lwt.t + +(** Same as [wait_for_full] but ignore metadata from the file descriptor sink. + + More precisely, [filter] is applied to the value of field + ["fd-sink-item.v0"."event".]. + + If the daemon receives a JSON value that does not match the right + JSON structure, it is not given to [filter] and the event is + ignored. See [wait_for_full] to know what the JSON value must + look like. *) +val wait_for : ?where:string -> t -> string -> (JSON.t -> 'a option) -> 'a Lwt.t diff --git a/tezt/tests/sc_rollup.ml b/tezt/tests/sc_rollup.ml index c04abf4f28431f17bdd31ae1b85f4c5b5ada6dd1..cd35846db2cdcce7fb343402e54d017df1d9e763 100644 --- a/tezt/tests/sc_rollup.ml +++ b/tezt/tests/sc_rollup.ml @@ -184,6 +184,13 @@ let originate_sc_rollup ?(hooks = hooks) ?(burn_cap = Tez.(of_int 9999999)) let* () = Client.bake_for_and_wait client in return sc_rollup +(** Wait for the rollup node to detect a conflict *) +let wait_for_conflict_detected sc_node = + Sc_rollup_node.wait_for + sc_node + "sc_rollup_node_conflict_detected.v0" + (fun _ -> Some ()) + (* Configuration of a rollup node ------------------------------ @@ -2320,6 +2327,13 @@ let test_refutation_scenario ?commitment_period ?challenge_window variant ~kind let bootstrap1_key = Constant.bootstrap1.public_key_hash in let bootstrap2_key = Constant.bootstrap2.public_key_hash in + let conflict_detected = ref false in + let _ = + let* () = wait_for_conflict_detected sc_rollup_node in + conflict_detected := true ; + unit + in + let sc_rollup_node2 = Sc_rollup_node.create Operator node client ~default_operator:bootstrap2_key in @@ -2362,6 +2376,9 @@ let test_refutation_scenario ?commitment_period ?challenge_window variant ~kind in let* () = bake_levels ~hook (final_level - List.length inputs) client in + if not !conflict_detected then + Test.fail "Honest node did not detect the conflict" ; + let*! honest_deposit = RPC.Contracts.get_frozen_bonds ~contract_id:bootstrap1_key client in