diff --git a/src/bin_smart_rollup_node/main_smart_rollup_node.ml b/src/bin_smart_rollup_node/main_smart_rollup_node.ml index ff2fbe9acefa58f0090885506c0467b9c8240b44..00afaaeb25823b672f560811cb636ae1daac310b 100644 --- a/src/bin_smart_rollup_node/main_smart_rollup_node.ml +++ b/src/bin_smart_rollup_node/main_smart_rollup_node.ml @@ -387,6 +387,19 @@ let export_snapshot = let*! () = cctxt#message "Snapshot exported to %s@." snapshot_file in return_unit) +let import_snapshot = + let open Tezos_clic in + command + ~group + ~desc:"Import a snapshot file in a rollup node." + (args1 data_dir_arg) + (prefixes ["snapshot"; "import"] @@ Cli.snapshot_file_param @@ stop) + (fun data_dir snapshot_file cctxt -> + let open Lwt_result_syntax in + let* () = Snapshots.import cctxt ~data_dir ~snapshot_file in + let*! () = cctxt#message "Snapshot successfully imported@." in + return_unit) + let openapi_command = let open Tezos_clic in let open Lwt_result_syntax in @@ -409,6 +422,7 @@ let sc_rollup_commands () = dump_metrics; dump_durable_storage; export_snapshot; + import_snapshot; openapi_command; ] diff --git a/src/lib_smart_rollup_node/cli.ml b/src/lib_smart_rollup_node/cli.ml index cf720ecf1572b56a706fd15ffcfa93a0e3b0c482..b2f80173c443ddf2843fe3af1722912a5961aeca 100644 --- a/src/lib_smart_rollup_node/cli.ml +++ b/src/lib_smart_rollup_node/cli.ml @@ -406,6 +406,13 @@ let snapshot_dir_arg = directory)" string_parameter +let snapshot_file_param next = + Tezos_clic.param + ~name:"" + ~desc:"Snapshot archive file" + string_parameter + next + let string_list = Tezos_clic.parameter (fun (_cctxt : Client_context.full) s -> let list = String.split ',' s in diff --git a/src/lib_smart_rollup_node/node_context.ml b/src/lib_smart_rollup_node/node_context.ml index 4ef9b45f4fc289d20e735566d903e4c55fdbc4e4..5cb6fcfa0f48357a3aebac5a944c83132c52779a 100644 --- a/src/lib_smart_rollup_node/node_context.ml +++ b/src/lib_smart_rollup_node/node_context.ml @@ -140,6 +140,8 @@ let get_fee_parameter node_ctxt operation_kind = Operation_kind.Map.find operation_kind node_ctxt.config.fee_parameters |> Option.value ~default:(Configuration.default_fee_parameter operation_kind) +let global_lockfile_path ~data_dir = Filename.concat data_dir "lock" + let processing_lockfile_path ~data_dir = Filename.concat data_dir "processing_lock" diff --git a/src/lib_smart_rollup_node/node_context.mli b/src/lib_smart_rollup_node/node_context.mli index 45bf40c7c65bafdf77bac601f36f98ee4570ed64..49df1cceab88a7efe2c816fcdd3467ec4e8f828b 100644 --- a/src/lib_smart_rollup_node/node_context.mli +++ b/src/lib_smart_rollup_node/node_context.mli @@ -174,6 +174,9 @@ val check_op_in_whitelist_or_bailout_mode : *) val get_fee_parameter : _ t -> Operation_kind.t -> Injector_common.fee_parameter +(** The path for the lockfile used when starting and running the node. *) +val global_lockfile_path : data_dir:string -> string + (** The path for the lockfile used in block processing. *) val processing_lockfile_path : data_dir:string -> string diff --git a/src/lib_smart_rollup_node/node_context_loader.ml b/src/lib_smart_rollup_node/node_context_loader.ml index 692920f88c84707eed075aa72a9138ed59453e50..cb36b7b014355a633b8e467e8976040723f414f7 100644 --- a/src/lib_smart_rollup_node/node_context_loader.ml +++ b/src/lib_smart_rollup_node/node_context_loader.ml @@ -28,7 +28,7 @@ open Node_context let lock ~data_dir = - let lockfile_path = Filename.concat data_dir "lock" in + let lockfile_path = global_lockfile_path ~data_dir in let lock_aux ~data_dir = let open Lwt_result_syntax in let*! () = Event.acquiring_lock () in diff --git a/src/lib_smart_rollup_node/protocol_plugin_sig.ml b/src/lib_smart_rollup_node/protocol_plugin_sig.ml index 77d7cf0b59633b17198c7d28a22617554dacbce1..5591e63f9712c9339586e065553b6c10935ce397 100644 --- a/src/lib_smart_rollup_node/protocol_plugin_sig.ml +++ b/src/lib_smart_rollup_node/protocol_plugin_sig.ml @@ -212,6 +212,13 @@ module type LAYER1_HELPERS = sig (** Retrieve information about the last whitelist update on L1. *) val find_last_whitelist_update : #Client_context.full -> Address.t -> (Z.t * Int32.t) option tzresult Lwt.t + + (** Retrieve a commitment published on L1. *) + val get_commitment : + #Client_context.full -> + Address.t -> + Commitment.Hash.t -> + Commitment.t tzresult Lwt.t end (** Protocol specific functions for processing L1 blocks. *) diff --git a/src/lib_smart_rollup_node/snapshot_utils.ml b/src/lib_smart_rollup_node/snapshot_utils.ml index c6d884ac556748389c76e2ced3c4e90492721840..a155af5ce682090563060125ce89eed6c4dafc88 100644 --- a/src/lib_smart_rollup_node/snapshot_utils.ml +++ b/src/lib_smart_rollup_node/snapshot_utils.ml @@ -239,6 +239,16 @@ let rec create_dir ?(perm = 0o755) dir = let extract (module Reader : READER) (module Writer : WRITER) metadata_check ~snapshot_file ~dest = + let open Lwt_result_syntax in + let module Writer = struct + include Writer + + let count_progress = ref (fun _ -> ()) + + let output oc b p l = + !count_progress 1 ; + output oc b p l + end in let module Archive_reader = Tar.Make (struct include Reader include Writer @@ -249,21 +259,25 @@ let extract (module Reader : READER) (module Writer : WRITER) metadata_check Writer.open_out path in let in_chan = Reader.open_in snapshot_file in - try - let metadata = - read_snapshot_metadata - (module struct - include Reader + let reader_input : (module READER_INPUT) = + (module struct + include Reader - let in_chan = in_chan - end) - in - metadata_check metadata ; - Archive_reader.Archive.extract_gen out_channel_of_header in_chan ; - Reader.close_in in_chan - with e -> - Reader.close_in in_chan ; - raise e + let in_chan = in_chan + end) + in + Lwt.finalize + (fun () -> + let metadata = read_snapshot_metadata reader_input in + let* () = metadata_check metadata in + let spinner = Progress_bar.spinner ~message:"Extracting snapshot" in + Progress_bar.with_reporter spinner @@ fun count_progress -> + Writer.count_progress := count_progress ; + Archive_reader.Archive.extract_gen out_channel_of_header in_chan ; + return metadata) + (fun () -> + Reader.close_in in_chan ; + Lwt.return_unit) let compress ~snapshot_file = let Unix.{st_size = total; _} = Unix.stat snapshot_file in diff --git a/src/lib_smart_rollup_node/snapshot_utils.mli b/src/lib_smart_rollup_node/snapshot_utils.mli index 942e8172471670eca44404350bd75732b63faeb7..9cd9eb2631d8bbec833d5b4d88f7250ad5db6f23 100644 --- a/src/lib_smart_rollup_node/snapshot_utils.mli +++ b/src/lib_smart_rollup_node/snapshot_utils.mli @@ -54,14 +54,14 @@ val create : snapshot archive [snapshot_file] in the directory [dest]. Existing files in [dest] with the same names are overwritten. The metadata header read from the snapshot is checked with [check_metadata] before beginning - extraction. *) + extraction, and returned. *) val extract : reader -> writer -> - (snapshot_metadata -> unit) -> + (snapshot_metadata -> unit tzresult Lwt.t) -> snapshot_file:string -> dest:string -> - unit + snapshot_metadata tzresult Lwt.t (** [compress ~snapshot_file] compresses the snapshot archive [snapshot_file] of the form ["path/to/snapshot.uncompressed"] to a new file diff --git a/src/lib_smart_rollup_node/snapshots.ml b/src/lib_smart_rollup_node/snapshots.ml index 9b13c44e7bcba91f0bd6f0286ebeaea8c1c31477..c963f57a4b0011c7025ebf237fc3587f6821468f 100644 --- a/src/lib_smart_rollup_node/snapshots.ml +++ b/src/lib_smart_rollup_node/snapshots.ml @@ -83,13 +83,12 @@ let pre_export_checks_and_get_snapshot_metadata ~data_dir = let*! () = Context.close context in let* () = Store.close store in return - ( { - history_mode; - address = metadata.rollup_address; - head_level = head.header.level; - last_commitment = Sc_rollup_block.most_recent_commitment head.header; - }, - (module Plugin : Protocol_plugin_sig.S) ) + { + history_mode; + address = metadata.rollup_address; + head_level = head.header.level; + last_commitment = Sc_rollup_block.most_recent_commitment head.header; + } let first_available_level ~data_dir store = let open Lwt_result_syntax in @@ -105,10 +104,135 @@ let first_available_level ~data_dir store = let check_some hash what = function | Some x -> Ok x | None -> - error_with "Could not read %s at %a after export." what Block_hash.pp hash + error_with "Could not read %s at %a after import." what Block_hash.pp hash + +let check_block_data_and_get_content (store : _ Store.t) context hash = + let open Lwt_result_syntax in + let* b = Store.L2_blocks.read store.l2_blocks hash in + let*? _b, header = check_some hash "L2 block" b in + let* messages = Store.Messages.read store.messages header.inbox_witness in + let*? _messages, _ = check_some hash "messages" messages in + let* inbox = Store.Inboxes.read store.inboxes header.inbox_hash in + let*? inbox, () = check_some hash "inbox" inbox in + let* commitment = + match header.commitment_hash with + | None -> return_none + | Some commitment_hash -> + let* commitment = + Store.Commitments.read store.commitments commitment_hash + in + let*? commitment, () = check_some hash "commitment" commitment in + return_some commitment + in + (* Ensure head context is available. *) + let*! head_ctxt = Context.checkout context header.context in + let*? head_ctxt = check_some hash "context" head_ctxt in + return (header, inbox, commitment, head_ctxt) + +let check_block_data_consistency (metadata : Metadata.t) (store : _ Store.t) + context hash next_commitment = + let open Lwt_result_syntax in + let* header, inbox, commitment, head_ctxt = + check_block_data_and_get_content store context hash + in + let* (module Plugin) = + Protocol_plugins.proto_plugin_for_level_with_store store header.level + in + let*! pvm_state = Context.PVMState.find head_ctxt in + let*? pvm_state = check_some hash "pvm_state" pvm_state in + let*! state_hash = Plugin.Pvm.state_hash metadata.kind pvm_state in + let* () = + match (commitment, header.commitment_hash) with + | None, None -> return_unit + | Some _, None | None, Some _ -> + (* The commitment is fetched from the header value *) + assert false + | Some commitment, Some commitment_hash -> + let hash_of_commitment = Commitment.hash commitment in + let*? () = + error_unless Commitment.Hash.(hash_of_commitment = commitment_hash) + @@ error_of_fmt + "Erroneous commitment hash %a for level %ld instead of %a." + Commitment.Hash.pp + hash_of_commitment + header.level + Commitment.Hash.pp + commitment_hash + in + let*? () = + error_unless State_hash.(state_hash = commitment.compressed_state) + @@ error_of_fmt + "Erroneous state hash %a for level %ld instead of %a." + State_hash.pp + state_hash + header.level + State_hash.pp + commitment.compressed_state + in + let*? () = + error_unless (commitment.inbox_level = header.level) + @@ error_of_fmt + "Erroneous inbox level %ld in commitment instead of level %ld." + commitment.inbox_level + header.level + in + let*? () = + if header.level = metadata.genesis_info.level then Ok () + else + error_unless + Commitment.Hash.( + header.previous_commitment_hash = commitment.predecessor) + @@ error_of_fmt + "Erroneous previous commitment hash %a for level %ld instead \ + of %a." + Commitment.Hash.pp + header.previous_commitment_hash + header.level + Commitment.Hash.pp + commitment.predecessor + in + return_unit + in + let*? () = + match (next_commitment, header.commitment_hash) with + | None, _ | _, None -> + (* If there is no commitment for this block there is no check to do. *) + Ok () + | Some next_commitment, Some commitment_hash -> + error_unless + Commitment.Hash.( + next_commitment.Commitment.predecessor = commitment_hash) + @@ error_of_fmt + "Commitment hash %a for level %ld was expected to be %a in the \ + chain of commitments." + Commitment.Hash.pp + commitment_hash + header.level + Commitment.Hash.pp + next_commitment.predecessor + in + let hash_of_inbox = Inbox.hash inbox in + let*? () = + error_unless Inbox.Hash.(hash_of_inbox = header.inbox_hash) + @@ error_of_fmt + "Erroneous inbox %a for level %ld instead of %a." + Inbox.Hash.pp + hash_of_inbox + header.level + Inbox.Hash.pp + header.inbox_hash + in + return (header, commitment) + +let check_block_data (store : _ Store.t) context hash _next_commitment = + let open Lwt_result_syntax in + let* header, _inbox, commitment, _head_ctxt = + check_block_data_and_get_content store context hash + in + return (header, commitment) let check_l2_chain ~message ~data_dir (store : _ Store.t) context - (head : Sc_rollup_block.t) = + (head : Sc_rollup_block.t) check_block = let open Lwt_result_syntax in let* first_available_level = first_available_level ~data_dir store in let blocks_to_check = @@ -122,39 +246,114 @@ let check_l2_chain ~message ~data_dir (store : _ Store.t) context blocks_to_check in Progress_bar.Lwt.with_reporter progress_bar @@ fun count_progress -> - let rec check_block hash = - let* b = Store.L2_blocks.read store.l2_blocks hash in - let*? _b, header = check_some hash "L2 block" b in - let* messages = Store.Messages.read store.messages header.inbox_witness in - let*? _messages = check_some hash "messages" messages in - let* inbox = Store.Inboxes.read store.inboxes header.inbox_hash in - let*? _inbox = check_some hash "inbox" inbox in - let* () = - match header.commitment_hash with - | None -> return_unit - | Some commitment_hash -> - let* commitment = - Store.Commitments.read store.commitments commitment_hash - in - let*? _commitment = check_some hash "commitment" commitment in - return_unit - in - (* Ensure head context is available. *) - let*! head_ctxt = Context.checkout context header.context in - let*? _head_ctxt = check_some hash "context" head_ctxt in + let rec check_chain hash next_commitment = + let* header, commitment = check_block store context hash next_commitment in let*! () = count_progress 1 in - if header.level <= first_available_level then return_unit - else check_block header.predecessor + if header.Sc_rollup_block.level <= first_available_level then return_unit + else + check_chain header.predecessor (Option.either commitment next_commitment) in - check_block head.header.block_hash + check_chain head.header.block_hash None -let post_import_checks ~message ~dest protocol_plugin = +let check_last_commitment head snapshot_metadata = + let last_snapshot_commitment = + Sc_rollup_block.most_recent_commitment head.Sc_rollup_block.header + in + error_unless + Commitment.Hash.( + snapshot_metadata.last_commitment = last_snapshot_commitment) + @@ error_of_fmt + "Last commitment in snapshot is %a but should be %a." + Commitment.Hash.pp + last_snapshot_commitment + Commitment.Hash.pp + snapshot_metadata.last_commitment + +let check_last_commitment_published cctxt snapshot_metadata = + let open Lwt_result_syntax in + Error.trace_lwt_result_with + "Last commitment of snapshot is not published on L1." + @@ let* {current_protocol; _} = + Tezos_shell_services.Shell_services.Blocks.protocols + cctxt + ~block:(`Head 0) + () + in + let*? (module Plugin) = + Protocol_plugins.proto_plugin_for_protocol current_protocol + in + let* (_commitment : Commitment.t) = + Plugin.Layer1_helpers.get_commitment + cctxt + snapshot_metadata.address + snapshot_metadata.last_commitment + in + return_unit + +let check_lcc metadata cctxt (store : _ Store.t) (head : Sc_rollup_block.t) + (module Plugin : Protocol_plugin_sig.S) = + let open Lwt_result_syntax in + let* lcc = + Plugin.Layer1_helpers.get_last_cemented_commitment + cctxt + metadata.Metadata.rollup_address + in + if lcc.level > head.header.level then + (* The snapshot is older than the current LCC *) + return_unit + else + let* lcc_block_hash = + Store.Levels_to_hashes.find store.levels_to_hashes lcc.level + in + let*? lcc_block_hash = + match lcc_block_hash with + | None -> error_with "No block for LCC level %ld" lcc.level + | Some h -> Ok h + in + let* lcc_block_header = + Store.L2_blocks.header store.l2_blocks lcc_block_hash + in + match lcc_block_header with + | None -> + failwith + "Unknown block %a for LCC level %ld" + Block_hash.pp + lcc_block_hash + lcc.level + | Some {commitment_hash = None; _} -> + failwith + "No commitment for block %a for LCC level %ld" + Block_hash.pp + lcc_block_hash + lcc.level + | Some {commitment_hash = Some commitment_hash; _} -> + fail_unless Commitment.Hash.(lcc.commitment = commitment_hash) + @@ error_of_fmt + "Snapshot contains %a for LCC at level %ld but was expected to be \ + %a." + Commitment.Hash.pp + commitment_hash + lcc.level + Commitment.Hash.pp + lcc.commitment + +let post_checks ~action ~message snapshot_metadata ~dest = let open Lwt_result_syntax in let store_dir = Configuration.default_storage_dir dest in let context_dir = Configuration.default_context_dir dest in (* Load context and stores in read-only to run checks. *) let* () = check_store_version store_dir in - let (module Plugin : Protocol_plugin_sig.S) = protocol_plugin in + let* store = + Store.load + Read_only + ~index_buffer_size:1000 + ~l2_blocks_cache_size:100 + store_dir + in + let* head = get_head store in + let* (module Plugin) = + Protocol_plugins.proto_plugin_for_level_with_store store head.header.level + in let* metadata = Metadata.read_metadata_file ~dir:dest in let*? metadata = match metadata with @@ -165,24 +364,45 @@ let post_import_checks ~message ~dest protocol_plugin = let* context = Context.load (module C) ~cache_size:100 Read_only context_dir in - let* store = - Store.load - Read_only - ~index_buffer_size:1000 - ~l2_blocks_cache_size:100 - store_dir - in - let* head = get_head store in let* head = check_head head context in - let* () = check_l2_chain ~message ~data_dir:dest store context head in + let* check_block_data = + match action with + | `Export -> return check_block_data + | `Import cctxt -> ( + let* metadata = Metadata.read_metadata_file ~dir:dest in + match metadata with + | None -> + (* We need the kind of the rollup to run the consistency checks in + order to verify state hashes. *) + failwith "No metadata (needs rollup kind)." + | Some metadata -> + let*? () = check_last_commitment head snapshot_metadata in + let* () = check_lcc metadata cctxt store head (module Plugin) in + return (check_block_data_consistency metadata)) + in + let* () = + check_l2_chain ~message ~data_dir:dest store context head check_block_data + in let*! () = Context.close context in let* () = Store.close store in return_unit -let post_export_checks ~snapshot_file context_plugin = +let post_export_checks ~snapshot_file = + let open Lwt_result_syntax in Lwt_utils_unix.with_tempdir "snapshot_checks_" @@ fun dest -> - extract gzip_reader stdlib_writer (fun _ -> ()) ~snapshot_file ~dest ; - post_import_checks ~message:"Checking snapshot " ~dest context_plugin + let* snapshot_metadata = + extract + gzip_reader + stdlib_writer + (fun _ -> return_unit) + ~snapshot_file + ~dest + in + post_checks + ~action:`Export + ~message:"Checking snapshot " + snapshot_metadata + ~dest let operator_local_file_regexp = Re.Str.regexp "^storage/\\(commitments_published_at_level.*\\|lpc$\\)" @@ -193,16 +413,14 @@ let snapshotable_files_regexp = let export ~data_dir ~dest = let open Lwt_result_syntax in - let* uncompressed_snapshot, protocol_plugin = + let* uncompressed_snapshot = Format.eprintf "Acquiring GC lock@." ; (* Take GC lock first in order to not prevent progression of rollup node. *) Utils.with_lockfile (Node_context.gc_lockfile_path ~data_dir) @@ fun () -> Format.eprintf "Acquiring process lock@." ; Utils.with_lockfile (Node_context.processing_lockfile_path ~data_dir) @@ fun () -> - let* metadata, protocol_plugin = - pre_export_checks_and_get_snapshot_metadata ~data_dir - in + let* metadata = pre_export_checks_and_get_snapshot_metadata ~data_dir in let dest_file_name = Format.asprintf "snapshot-%a-%ld.%s.uncompressed" @@ -232,8 +450,90 @@ let export ~data_dir ~dest = ~dest:dest_file ; return_unit in - return (dest_file, protocol_plugin) + return dest_file in let snapshot_file = compress ~snapshot_file:uncompressed_snapshot in - let* () = post_export_checks ~snapshot_file protocol_plugin in + let* () = post_export_checks ~snapshot_file in return snapshot_file + +let pre_import_checks cctxt ~data_dir snapshot_metadata = + let open Lwt_result_syntax in + let store_dir = Configuration.default_storage_dir data_dir in + (* Load stores in read-only to make simple checks. *) + let* store = + Store.load + Read_write + ~index_buffer_size:1000 + ~l2_blocks_cache_size:100 + store_dir + in + let* metadata = Metadata.read_metadata_file ~dir:data_dir + and* history_mode = Store.History_mode.read store.history_mode + and* head = Store.L2_head.read store.l2_head in + let* () = Store.close store in + let*? () = + let open Result_syntax in + match (metadata, history_mode) with + | None, _ | _, None -> + (* The rollup node data dir was never initialized, i.e. the rollup node + wasn't run yet. *) + return_unit + | Some {rollup_address; _}, Some history_mode -> + let* () = + error_unless Address.(rollup_address = snapshot_metadata.address) + @@ error_of_fmt + "The existing rollup node is for %a, but the snapshot is for \ + rollup %a." + Address.pp + rollup_address + Address.pp + snapshot_metadata.address + in + let a_history_str = function + | Configuration.Archive -> "an archive" + | Configuration.Full -> "a full" + in + error_unless (history_mode = snapshot_metadata.history_mode) + @@ error_of_fmt + "Cannot import %s snapshot into %s rollup node." + (a_history_str snapshot_metadata.history_mode) + (a_history_str history_mode) + in + let*? () = + let open Result_syntax in + match head with + | None -> + (* The rollup node has no L2 chain. *) + return_unit + | Some head -> + error_when (snapshot_metadata.head_level <= head.header.level) + @@ error_of_fmt + "The rollup node is already at level %ld but the snapshot is only \ + for level %ld." + head.header.level + snapshot_metadata.head_level + in + let* () = check_last_commitment_published cctxt snapshot_metadata in + return_unit + +let import cctxt ~data_dir ~snapshot_file = + let open Lwt_result_syntax in + let*! () = Lwt_utils_unix.create_dir data_dir in + let*! () = Event.acquiring_lock () in + Utils.with_lockfile + ~when_locked:`Fail + (Node_context.global_lockfile_path ~data_dir) + @@ fun () -> + let* snapshot_metadata = + extract + gzip_reader + stdlib_writer + (pre_import_checks cctxt ~data_dir) + ~snapshot_file + ~dest:data_dir + in + post_checks + ~action:(`Import cctxt) + ~message:"Checking imported data" + snapshot_metadata + ~dest:data_dir diff --git a/src/lib_smart_rollup_node/snapshots.mli b/src/lib_smart_rollup_node/snapshots.mli index a365907dbe9a9b420cbf6dc1f3eaa274512f322f..6243b0e75668df9983a7ecbbd5aae6a0f8e04e85 100644 --- a/src/lib_smart_rollup_node/snapshots.mli +++ b/src/lib_smart_rollup_node/snapshots.mli @@ -9,3 +9,11 @@ current directory) containing a snapshot of the data of the rollup node with data directory [data_dir]. The path of the snapshot archive is returned. *) val export : data_dir:string -> dest:string option -> string tzresult Lwt.t + +(** [import cctxt ~data_dir ~snapshot_file] imports the snapshot at path + [snapshot_file] into the data directory [data_dir]. *) +val import : + #Client_context.full -> + data_dir:string -> + snapshot_file:string -> + unit tzresult Lwt.t diff --git a/src/lib_smart_rollup_node/utils.ml b/src/lib_smart_rollup_node/utils.ml index 7aa495e2d0a80375c9c75f43e99dbcadddd6a997..4eac728e539668a4320d72b017688fa05eee5e2c 100644 --- a/src/lib_smart_rollup_node/utils.ml +++ b/src/lib_smart_rollup_node/utils.ml @@ -76,7 +76,7 @@ let dictionary_encoding ~keys ~string_of_key ~key_of_string ~value_encoding = | _ -> assert false) Data_encoding.Json.encoding -let lock lockfile_path = +let lock ?(when_locked = `Block) lockfile_path = let open Lwt_result_syntax in let* lockfile = protect @@ fun () -> @@ -92,7 +92,10 @@ let lock lockfile_path = let*! () = Lwt_unix.close lockfile in fail err) @@ fun () -> - let*! () = Lwt_unix.lockf lockfile Unix.F_LOCK 0 in + let command = + match when_locked with `Block -> Unix.F_LOCK | `Fail -> Unix.F_TLOCK + in + let*! () = Lwt_unix.lockf lockfile command 0 in return_unit in return lockfile @@ -102,7 +105,7 @@ let unlock lockfile = (fun () -> Lwt_unix.lockf lockfile Unix.F_ULOCK 0) (fun () -> Lwt_unix.close lockfile) -let with_lockfile lockfile_path f = +let with_lockfile ?when_locked lockfile_path f = let open Lwt_result_syntax in - let* lockfile = lock lockfile_path in + let* lockfile = lock ?when_locked lockfile_path in Lwt.finalize f (fun () -> unlock lockfile) diff --git a/src/lib_smart_rollup_node/utils.mli b/src/lib_smart_rollup_node/utils.mli index bb0522e71305f1910530ce4dc8d65b4bf6cea270..58814d4efd64910ff811e7120c72b197af44ec11 100644 --- a/src/lib_smart_rollup_node/utils.mli +++ b/src/lib_smart_rollup_node/utils.mli @@ -43,16 +43,25 @@ val dictionary_encoding : (** {2 Lock files} *) -(** [lock path] acquires a lock on the file [path] and returns the opened file - descriptor (for unlocking). If there is already a lock on [path], this - function call is blocking until the previous lock is released. *) -val lock : string -> Lwt_unix.file_descr tzresult Lwt.t +(** [lock ?when_lock path] acquires a lock on the file [path] and returns the + opened file descriptor (for unlocking). If there is already a lock on + [path], this function call is blocking until the previous lock is + released. If there is already a lock on [path], the call will block if + [when_lock] is [`Block] (the default), and will fail if [when_lock = + `Fail]. *) +val lock : + ?when_locked:[`Fail | `Block] -> string -> Lwt_unix.file_descr tzresult Lwt.t (** [unlock fd] releases the lock on the opened file descriptor [fd]. If there is no lock or if it is already released, this function does nothing. *) val unlock : Lwt_unix.file_descr -> unit Lwt.t -(** [with_lockfile path f] executes the function [f] by taking a lock on the - file [path]. If there is already a lock on [path], the execution of [f] is - blocking until the previous lock is released. *) -val with_lockfile : string -> (unit -> 'a tzresult Lwt.t) -> 'a tzresult Lwt.t +(** [with_lockfile ?when_lock path f] executes the function [f] by taking a lock + on the file [path]. If there is already a lock on [path], the execution of + [f] is blocking until the previous lock is released. See {!lock} for a + description of the [when_lock] parameter. *) +val with_lockfile : + ?when_locked:[`Fail | `Block] -> + string -> + (unit -> 'a tzresult Lwt.t) -> + 'a tzresult Lwt.t diff --git a/src/proto_017_PtNairob/lib_sc_rollup_node/layer1_helpers.ml b/src/proto_017_PtNairob/lib_sc_rollup_node/layer1_helpers.ml index 061d419a310c9d54850f1e52063e60a97cc02105..fa347dda9529d3f987df12c34ba1d927477dd0b7 100644 --- a/src/proto_017_PtNairob/lib_sc_rollup_node/layer1_helpers.ml +++ b/src/proto_017_PtNairob/lib_sc_rollup_node/layer1_helpers.ml @@ -242,3 +242,14 @@ let find_whitelist _cctxt _rollup_address : return None let find_last_whitelist_update _cctxt _rollup_address = return_none + +let get_commitment cctxt rollup_address commitment_hash = + let open Lwt_result_syntax in + let+ commitment = + Plugin.RPC.Sc_rollup.commitment + (new Protocol_client_context.wrap_full (cctxt :> Client_context.full)) + (cctxt#chain, `Head 0) + (Sc_rollup_proto_types.Address.of_octez rollup_address) + (Sc_rollup_proto_types.Commitment_hash.of_octez commitment_hash) + in + Sc_rollup_proto_types.Commitment.to_octez commitment diff --git a/src/proto_017_PtNairob/lib_sc_rollup_node/layer1_helpers.mli b/src/proto_017_PtNairob/lib_sc_rollup_node/layer1_helpers.mli index 044605211f4948710532621d5256b0dd81b99725..55bdef4ae9ee041c8b21b1480c26e8861645f861 100644 --- a/src/proto_017_PtNairob/lib_sc_rollup_node/layer1_helpers.mli +++ b/src/proto_017_PtNairob/lib_sc_rollup_node/layer1_helpers.mli @@ -84,3 +84,10 @@ val find_whitelist : (** Find and retrieve information about the last whitelist update. *) val find_last_whitelist_update : #Client_context.full -> Address.t -> (Z.t * Int32.t) option tzresult Lwt.t + +(** Retrieve a commitment published on L1. *) +val get_commitment : + #Client_context.full -> + Address.t -> + Commitment.Hash.t -> + Commitment.t tzresult Lwt.t diff --git a/src/proto_018_Proxford/lib_sc_rollup_node/layer1_helpers.ml b/src/proto_018_Proxford/lib_sc_rollup_node/layer1_helpers.ml index eddaea768e5ed3c2a0e7bc8ff6e3389ab91ce29e..95d97652f5aa2a8481109ebd72a3d68654bcf375 100644 --- a/src/proto_018_Proxford/lib_sc_rollup_node/layer1_helpers.ml +++ b/src/proto_018_Proxford/lib_sc_rollup_node/layer1_helpers.ml @@ -260,3 +260,14 @@ let find_last_whitelist_update cctxt rollup_address = (message_index, Protocol.Alpha_context.Raw_level.to_int32 outbox_level)) last_whitelist_update |> return + +let get_commitment cctxt rollup_address commitment_hash = + let open Lwt_result_syntax in + let+ commitment = + Plugin.RPC.Sc_rollup.commitment + (new Protocol_client_context.wrap_full (cctxt :> Client_context.full)) + (cctxt#chain, `Head 0) + rollup_address + commitment_hash + in + Sc_rollup_proto_types.Commitment.to_octez commitment diff --git a/src/proto_018_Proxford/lib_sc_rollup_node/layer1_helpers.mli b/src/proto_018_Proxford/lib_sc_rollup_node/layer1_helpers.mli index f64ebce29da1a6f7bb292abfaaecc7a08695ccc2..a75292119500a3da6f68fbe3b90a7ab579c0ea88 100644 --- a/src/proto_018_Proxford/lib_sc_rollup_node/layer1_helpers.mli +++ b/src/proto_018_Proxford/lib_sc_rollup_node/layer1_helpers.mli @@ -84,3 +84,10 @@ val find_whitelist : (** Find and retrieve information about the last whitelist update. *) val find_last_whitelist_update : #Client_context.full -> Address.t -> (Z.t * Int32.t) option tzresult Lwt.t + +(** Retrieve a commitment published on L1. *) +val get_commitment : + #Client_context.full -> + Address.t -> + Commitment.Hash.t -> + Commitment.t tzresult Lwt.t diff --git a/src/proto_alpha/lib_sc_rollup_node/layer1_helpers.ml b/src/proto_alpha/lib_sc_rollup_node/layer1_helpers.ml index eddaea768e5ed3c2a0e7bc8ff6e3389ab91ce29e..95d97652f5aa2a8481109ebd72a3d68654bcf375 100644 --- a/src/proto_alpha/lib_sc_rollup_node/layer1_helpers.ml +++ b/src/proto_alpha/lib_sc_rollup_node/layer1_helpers.ml @@ -260,3 +260,14 @@ let find_last_whitelist_update cctxt rollup_address = (message_index, Protocol.Alpha_context.Raw_level.to_int32 outbox_level)) last_whitelist_update |> return + +let get_commitment cctxt rollup_address commitment_hash = + let open Lwt_result_syntax in + let+ commitment = + Plugin.RPC.Sc_rollup.commitment + (new Protocol_client_context.wrap_full (cctxt :> Client_context.full)) + (cctxt#chain, `Head 0) + rollup_address + commitment_hash + in + Sc_rollup_proto_types.Commitment.to_octez commitment diff --git a/src/proto_alpha/lib_sc_rollup_node/layer1_helpers.mli b/src/proto_alpha/lib_sc_rollup_node/layer1_helpers.mli index f64ebce29da1a6f7bb292abfaaecc7a08695ccc2..a75292119500a3da6f68fbe3b90a7ab579c0ea88 100644 --- a/src/proto_alpha/lib_sc_rollup_node/layer1_helpers.mli +++ b/src/proto_alpha/lib_sc_rollup_node/layer1_helpers.mli @@ -84,3 +84,10 @@ val find_whitelist : (** Find and retrieve information about the last whitelist update. *) val find_last_whitelist_update : #Client_context.full -> Address.t -> (Z.t * Int32.t) option tzresult Lwt.t + +(** Retrieve a commitment published on L1. *) +val get_commitment : + #Client_context.full -> + Address.t -> + Commitment.Hash.t -> + Commitment.t tzresult Lwt.t diff --git a/tezt/lib_tezos/sc_rollup_node.ml b/tezt/lib_tezos/sc_rollup_node.ml index c7ed100b10ca50e9df97f8fcaeaaf21ef1dd71b7..78572d664e52fb58f69f1e3031e358664caebd0d 100644 --- a/tezt/lib_tezos/sc_rollup_node.ml +++ b/tezt/lib_tezos/sc_rollup_node.ml @@ -629,6 +629,20 @@ let export_snapshot sc_rollup_node dir = in Runnable.{value = process; run = parse} +let import_snapshot sc_rollup_node ~snapshot_file = + let process = + spawn_command + sc_rollup_node + [ + "snapshot"; + "import"; + snapshot_file; + "--data-dir"; + data_dir sc_rollup_node; + ] + in + Runnable.{value = process; run = Process.check} + let as_rpc_endpoint (t : t) = let state = t.persistent_state in let scheme = "http" in diff --git a/tezt/lib_tezos/sc_rollup_node.mli b/tezt/lib_tezos/sc_rollup_node.mli index cdb5c66a5bec14461da7d370c35411f7323178f0..7958562b2f395749a638cb0bed8c347e16e2ad51 100644 --- a/tezt/lib_tezos/sc_rollup_node.mli +++ b/tezt/lib_tezos/sc_rollup_node.mli @@ -322,6 +322,10 @@ val dump_durable_storage : directory [dir]. *) val export_snapshot : t -> string -> string Runnable.process +(** [import_snapshot rollup_node ~snapshot_file] imports the snapshot + [snapshot_file] in the rollup node [rollup_node]. *) +val import_snapshot : t -> snapshot_file:string -> unit Runnable.process + (** Expose the RPC server address of this node as a foreign endpoint. *) val as_rpc_endpoint : t -> Endpoint.t diff --git a/tezt/tests/sc_rollup.ml b/tezt/tests/sc_rollup.ml index a6d8abff5a8d58d8d62e3fcf13876638c6aca80c..aac5b4f85987cd5d3d36dae14ab1afd43249d11a 100644 --- a/tezt/tests/sc_rollup.ml +++ b/tezt/tests/sc_rollup.ml @@ -1016,8 +1016,14 @@ let test_gc variant ~challenge_window ~commitment_period ~history_mode = | _ -> ()) ; unit -(* Testing that snapshots can be exported correctly for a running node. *) -let test_snapshots ~challenge_window ~commitment_period ~history_mode = +(* Testing that snapshots can be exported correctly for a running node, and that + they can be used to bootstrap a blank or existing rollup node. + - we run two rollup nodes but stop the second one at some point + - after a while we create a snapshot from the first rollup node + - we import the snapshot in the second and a fresh rollup node + - we ensure they are all synchronized + - we also try to import invalid snapshots to make sure they are rejected. *) +let test_snapshots ~kind ~challenge_window ~commitment_period ~history_mode = let history_mode_str = Sc_rollup_node.string_of_history_mode history_mode in test_full_scenario { @@ -1026,18 +1032,40 @@ let test_snapshots ~challenge_window ~commitment_period ~history_mode = description = sf "snapshot can be exported and checked (%s)" history_mode_str; } + ~kind ~challenge_window ~commitment_period - @@ fun _protocol sc_rollup_node _rollup_client sc_rollup _node client -> + @@ fun _protocol sc_rollup_node _rollup_client sc_rollup node client -> + (* Originate another rollup for sanity checks *) + let* other_rollup = originate_sc_rollup ~alias:"other_rollup" ~kind client in (* We want to produce snapshots for rollup node which have cemented commitments *) - let level_snapshot = 2 * challenge_window in + let* level = Node.get_level node in + let level_snapshot = level + (2 * challenge_window) in (* We want to build an L2 chain that goes beyond the snapshots (and has additional commitments). *) let total_blocks = level_snapshot + (4 * commitment_period) in + let stop_rollup_node_2_levels = challenge_window + 2 in let* () = Sc_rollup_node.run ~history_mode sc_rollup_node sc_rollup [] in + (* We run the other nodes in mode observer because we only care if they can + catch up. *) + let rollup_node_2 = + Sc_rollup_node.create Observer node ~base_dir:(Client.base_dir client) + in + let rollup_node_3 = + Sc_rollup_node.create Observer node ~base_dir:(Client.base_dir client) + in + let rollup_node_4 = + Sc_rollup_node.create Observer node ~base_dir:(Client.base_dir client) + in + let* () = Sc_rollup_node.run ~history_mode rollup_node_2 sc_rollup [] in + let* () = Sc_rollup_node.run ~history_mode rollup_node_4 other_rollup [] in let rollup_node_processing = - let* () = bake_levels total_blocks client in + let* () = bake_levels stop_rollup_node_2_levels client in + Log.info "Stopping rollup node 2 before snapshot is made." ; + let* () = Sc_rollup_node.terminate rollup_node_2 in + let* () = Sc_rollup_node.terminate rollup_node_4 in + let* () = bake_levels (total_blocks - stop_rollup_node_2_levels) client in let* (_ : int) = Sc_rollup_node.wait_sync sc_rollup_node ~timeout:3. in unit in @@ -1045,11 +1073,61 @@ let test_snapshots ~challenge_window ~commitment_period ~history_mode = Sc_rollup_node.wait_for_level sc_rollup_node level_snapshot in let dir = Tezt.Temp.dir "snapshots" in - let*! snapshot_path = Sc_rollup_node.export_snapshot sc_rollup_node dir in - let* exists = Lwt_unix.file_exists snapshot_path in + let*! snapshot_file = Sc_rollup_node.export_snapshot sc_rollup_node dir in + let* exists = Lwt_unix.file_exists snapshot_file in if not exists then - Test.fail ~__LOC__ "Snapshot file %s does not exist" snapshot_path ; + Test.fail ~__LOC__ "Snapshot file %s does not exist" snapshot_file ; let* () = rollup_node_processing in + Log.info "Try importing snapshot for wrong rollup." ; + let*? process_other = + Sc_rollup_node.import_snapshot rollup_node_4 ~snapshot_file + in + let* () = + Process.check_error + ~msg:(rex "The existing rollup node is for") + process_other + in + Log.info "Importing snapshot in empty rollup node." ; + let*! () = Sc_rollup_node.import_snapshot rollup_node_3 ~snapshot_file in + (* rollup_node_2 was stopped before so it has data but is late with respect to + sc_rollup_node. *) + Log.info "Importing snapshot in late rollup node." ; + let*! () = Sc_rollup_node.import_snapshot rollup_node_2 ~snapshot_file in + Log.info "Running rollup nodes with snapshots until they catch up." ; + let* () = Sc_rollup_node.run ~history_mode rollup_node_2 sc_rollup [] + and* () = Sc_rollup_node.run ~history_mode rollup_node_3 sc_rollup [] in + let* _ = Sc_rollup_node.wait_sync ~timeout:60. rollup_node_2 + and* _ = Sc_rollup_node.wait_sync ~timeout:60. rollup_node_3 in + Log.info "Try importing outdated snapshot." ; + let* () = Sc_rollup_node.terminate rollup_node_2 in + let*? outdated = + Sc_rollup_node.import_snapshot rollup_node_2 ~snapshot_file + in + let* () = + Process.check_error + ~msg:(rex "The rollup node is already at level") + outdated + in + Log.info "Bake until next commitment." ; + let* () = + let event_name = "smart_rollup_node_new_commitment.v0" in + bake_until_event client ~event_name + @@ Sc_rollup_node.wait_for sc_rollup_node event_name (Fun.const (Some ())) + in + let* _ = Sc_rollup_node.wait_sync ~timeout:30.0 sc_rollup_node in + let*! snapshot_file = Sc_rollup_node.export_snapshot sc_rollup_node dir in + (* The rollup node should not have published its commitment yet *) + Log.info "Try importing snapshot without published commitment." ; + Log.info "Try importing outdated snapshot." ; + let* () = Sc_rollup_node.terminate rollup_node_2 in + let*? unpublished = + Sc_rollup_node.import_snapshot rollup_node_2 ~snapshot_file + in + let* () = + Process.check_error + ~msg:(rex "Last commitment of snapshot is not published on L1.") + unpublished + in unit (* One can retrieve the list of originated SCORUs.