1
// Copyright 2024 Moonbeam foundation
2
// This file is part of Moonbeam.
3

            
4
// Moonbeam is free software: you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation, either version 3 of the License, or
7
// (at your option) any later version.
8

            
9
// Moonbeam is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
// GNU General Public License for more details.
13

            
14
// You should have received a copy of the GNU General Public License
15
// along with Moonbeam.  If not, see <http://www.gnu.org/licenses/>.
16

            
17
//! # Lazy Migration Pallet
18

            
19
#![allow(non_camel_case_types)]
20
#![cfg_attr(not(feature = "std"), no_std)]
21

            
22
#[cfg(any(test, feature = "runtime-benchmarks"))]
23
mod benchmarks;
24
#[cfg(test)]
25
mod mock;
26
#[cfg(test)]
27
mod tests;
28

            
29
pub mod weights;
30
pub use weights::WeightInfo;
31

            
32
use frame_support::pallet;
33

            
34
pub use pallet::*;
35

            
36
const MAX_CONTRACT_CODE_SIZE: u64 = 25 * 1024;
37

            
38
274
#[pallet]
39
pub mod pallet {
40
	use super::*;
41
	use cumulus_primitives_storage_weight_reclaim::get_proof_size;
42
	use frame_support::pallet_prelude::*;
43
	use frame_system::pallet_prelude::*;
44
	use sp_core::H160;
45

            
46
	pub const ARRAY_LIMIT: u32 = 1000;
47
	pub type GetArrayLimit = ConstU32<ARRAY_LIMIT>;
48

            
49
	/// Pallet for multi block migrations
50
30
	#[pallet::pallet]
51
	pub struct Pallet<T>(PhantomData<T>);
52

            
53
28
	#[pallet::storage]
54
	/// The total number of suicided contracts that were removed
55
	pub(crate) type SuicidedContractsRemoved<T: Config> = StorageValue<_, u32, ValueQuery>;
56

            
57
1148
	#[pallet::storage]
58
	pub(crate) type StateMigrationStatusValue<T: Config> =
59
		StorageValue<_, (StateMigrationStatus, u64), ValueQuery>;
60

            
61
	pub(crate) type StorageKey = BoundedVec<u8, ConstU32<1_024>>;
62

            
63
228
	#[derive(Clone, Encode, Decode, scale_info::TypeInfo, PartialEq, Eq, MaxEncodedLen, Debug)]
64
	pub enum StateMigrationStatus {
65
1
		NotStarted,
66
207
		Started(StorageKey),
67
1
		Error(BoundedVec<u8, ConstU32<1024>>),
68
200
		Complete,
69
	}
70

            
71
	impl Default for StateMigrationStatus {
72
111
		fn default() -> Self {
73
111
			return StateMigrationStatus::NotStarted;
74
111
		}
75
	}
76

            
77
	/// Configuration trait of this pallet.
78
	#[pallet::config]
79
	pub trait Config: frame_system::Config + pallet_evm::Config + pallet_balances::Config {
80
		type WeightInfo: WeightInfo;
81
	}
82

            
83
11
	#[pallet::error]
84
	pub enum Error<T> {
85
		/// The limit cannot be zero
86
		LimitCannotBeZero,
87
		/// There must be at least one address
88
		AddressesLengthCannotBeZero,
89
		/// The contract is not corrupted (Still exist or properly suicided)
90
		ContractNotCorrupted,
91
		/// The contract already have metadata
92
		ContractMetadataAlreadySet,
93
		/// Contract not exist
94
		ContractNotExist,
95
		/// The key lengths exceeds the maximum allowed
96
		KeyTooLong,
97
	}
98

            
99
	pub(crate) const MAX_ITEM_PROOF_SIZE: u64 = 30 * 1024; // 30 KB
100
	pub(crate) const PROOF_SIZE_BUFFER: u64 = 100 * 1024; // 100 KB
101

            
102
70
	#[pallet::hooks]
103
	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
104
231
		fn on_idle(_n: BlockNumberFor<T>, remaining_weight: Weight) -> Weight {
105
231
			let proof_size_before: u64 = get_proof_size().unwrap_or(0);
106
231
			let res = Pallet::<T>::handle_migration(remaining_weight);
107
231
			let proof_size_after: u64 = get_proof_size().unwrap_or(0);
108
231
			let proof_size_diff = proof_size_after.saturating_sub(proof_size_before);
109
231

            
110
231
			Weight::from_parts(0, proof_size_diff)
111
231
				.saturating_add(T::DbWeight::get().reads_writes(res.reads, res.writes))
112
231
		}
113
	}
114

            
115
	#[derive(Default, Clone, PartialEq, Eq, Encode, Decode, Debug)]
116
	pub(crate) struct ReadWriteOps {
117
		pub reads: u64,
118
		pub writes: u64,
119
	}
120

            
121
	impl ReadWriteOps {
122
260
		pub fn new() -> Self {
123
260
			Self {
124
260
				reads: 0,
125
260
				writes: 0,
126
260
			}
127
260
		}
128

            
129
259
		pub fn add_one_read(&mut self) {
130
259
			self.reads += 1;
131
259
		}
132

            
133
159
		pub fn add_one_write(&mut self) {
134
159
			self.writes += 1;
135
159
		}
136

            
137
262
		pub fn add_reads(&mut self, reads: u64) {
138
262
			self.reads += reads;
139
262
		}
140

            
141
159
		pub fn add_writes(&mut self, writes: u64) {
142
159
			self.writes += writes;
143
159
		}
144
	}
145

            
146
	#[derive(Clone)]
147
	struct StateMigrationResult {
148
		last_key: Option<StorageKey>,
149
		error: Option<&'static str>,
150
		migrated: u64,
151
		reads: u64,
152
		writes: u64,
153
	}
154

            
155
	enum NextKeyResult {
156
		NextKey(StorageKey),
157
		NoMoreKeys,
158
		Error(&'static str),
159
	}
160

            
161
	impl<T: Config> Pallet<T> {
162
		/// Handle the migration of the storage keys, returns the number of read and write operations
163
231
		pub(crate) fn handle_migration(remaining_weight: Weight) -> ReadWriteOps {
164
231
			let mut read_write_ops = ReadWriteOps::new();
165
231

            
166
231
			// maximum number of items that can be migrated in one block
167
231
			let migration_limit = remaining_weight
168
231
				.proof_size()
169
231
				.saturating_sub(PROOF_SIZE_BUFFER)
170
231
				.saturating_div(MAX_ITEM_PROOF_SIZE);
171
231

            
172
231
			if migration_limit == 0 {
173
1
				return read_write_ops;
174
230
			}
175
230

            
176
230
			let (status, mut migrated_keys) = StateMigrationStatusValue::<T>::get();
177
230
			read_write_ops.add_one_read();
178

            
179
230
			let next_key = match &status {
180
27
				StateMigrationStatus::NotStarted => Default::default(),
181
103
				StateMigrationStatus::Started(storage_key) => {
182
103
					let (reads, next_key_result) = Pallet::<T>::get_next_key(storage_key);
183
103
					read_write_ops.add_reads(reads);
184
103
					match next_key_result {
185
103
						NextKeyResult::NextKey(next_key) => next_key,
186
						NextKeyResult::NoMoreKeys => {
187
							StateMigrationStatusValue::<T>::put((
188
								StateMigrationStatus::Complete,
189
								migrated_keys,
190
							));
191
							read_write_ops.add_one_write();
192
							return read_write_ops;
193
						}
194
						NextKeyResult::Error(e) => {
195
							StateMigrationStatusValue::<T>::put((
196
								StateMigrationStatus::Error(
197
									e.as_bytes().to_vec().try_into().unwrap_or_default(),
198
								),
199
								migrated_keys,
200
							));
201
							read_write_ops.add_one_write();
202
							return read_write_ops;
203
						}
204
					}
205
				}
206
				StateMigrationStatus::Complete | StateMigrationStatus::Error(_) => {
207
100
					return read_write_ops;
208
				}
209
			};
210

            
211
130
			let res = Pallet::<T>::migrate_keys(next_key, migration_limit);
212
130
			migrated_keys += res.migrated;
213
130
			read_write_ops.add_reads(res.reads);
214
130
			read_write_ops.add_writes(res.writes);
215
130

            
216
130
			match (res.last_key, res.error) {
217
25
				(None, None) => {
218
25
					StateMigrationStatusValue::<T>::put((
219
25
						StateMigrationStatus::Complete,
220
25
						migrated_keys,
221
25
					));
222
25
					read_write_ops.add_one_write();
223
25
				}
224
				// maybe we should store the previous key in the storage as well
225
				(_, Some(e)) => {
226
					StateMigrationStatusValue::<T>::put((
227
						StateMigrationStatus::Error(
228
							e.as_bytes().to_vec().try_into().unwrap_or_default(),
229
						),
230
						migrated_keys,
231
					));
232
					read_write_ops.add_one_write();
233
				}
234
105
				(Some(key), None) => {
235
105
					StateMigrationStatusValue::<T>::put((
236
105
						StateMigrationStatus::Started(key),
237
105
						migrated_keys,
238
105
					));
239
105
					read_write_ops.add_one_write();
240
105
				}
241
			}
242

            
243
130
			read_write_ops
244
231
		}
245

            
246
		/// Tries to get the next key in the storage, returns None if there are no more keys to migrate.
247
		/// Returns an error if the key is too long.
248
11073
		fn get_next_key(key: &StorageKey) -> (u64, NextKeyResult) {
249
11073
			if let Some(next) = sp_io::storage::next_key(key) {
250
11048
				let next: Result<StorageKey, _> = next.try_into();
251
11048
				match next {
252
11048
					Ok(next_key) => {
253
11048
						if next_key.as_slice() == sp_core::storage::well_known_keys::CODE {
254
25
							let (reads, next_key_res) = Pallet::<T>::get_next_key(&next_key);
255
25
							return (1 + reads, next_key_res);
256
11023
						}
257
11023
						(1, NextKeyResult::NextKey(next_key))
258
					}
259
					Err(_) => (1, NextKeyResult::Error("Key too long")),
260
				}
261
			} else {
262
25
				(1, NextKeyResult::NoMoreKeys)
263
			}
264
11073
		}
265

            
266
		/// Migrate maximum of `limit` keys starting from `start`, returns the next key to migrate
267
		/// Returns None if there are no more keys to migrate.
268
		/// Returns an error if an error occurred during migration.
269
130
		fn migrate_keys(start: StorageKey, limit: u64) -> StateMigrationResult {
270
130
			let mut key = start;
271
130
			let mut migrated = 0;
272
130
			let mut next_key_reads = 0;
273
130
			let mut writes = 0;
274

            
275
11155
			while migrated < limit {
276
11050
				let data = sp_io::storage::get(&key);
277
11050
				if let Some(data) = data {
278
11023
					sp_io::storage::set(&key, &data);
279
11023
					writes += 1;
280
11023
				}
281

            
282
11050
				migrated += 1;
283
11050

            
284
11050
				if migrated < limit {
285
10945
					let (reads, next_key_res) = Pallet::<T>::get_next_key(&key);
286
10945
					next_key_reads += reads;
287
10945

            
288
10945
					match next_key_res {
289
10920
						NextKeyResult::NextKey(next_key) => {
290
10920
							key = next_key;
291
10920
						}
292
						NextKeyResult::NoMoreKeys => {
293
25
							return StateMigrationResult {
294
25
								last_key: None,
295
25
								error: None,
296
25
								migrated,
297
25
								reads: migrated + next_key_reads,
298
25
								writes,
299
25
							};
300
						}
301
						NextKeyResult::Error(e) => {
302
							return StateMigrationResult {
303
								last_key: Some(key),
304
								error: Some(e),
305
								migrated,
306
								reads: migrated + next_key_reads,
307
								writes,
308
							};
309
						}
310
					};
311
105
				}
312
			}
313

            
314
105
			StateMigrationResult {
315
105
				last_key: Some(key),
316
105
				error: None,
317
105
				migrated,
318
105
				reads: migrated + next_key_reads,
319
105
				writes,
320
105
			}
321
130
		}
322
	}
323

            
324
	#[pallet::call]
325
	impl<T: Config> Pallet<T> {
326
		// TODO(rodrigo): This extrinsic should be removed once the storage of destroyed contracts
327
		// has been removed
328
		#[pallet::call_index(1)]
329
		#[pallet::weight({
330
			let addresses_len = addresses.len() as u32;
331
			<T as crate::Config>::WeightInfo::clear_suicided_storage(addresses_len, *limit)
332
		})]
333
		pub fn clear_suicided_storage(
334
			origin: OriginFor<T>,
335
			addresses: BoundedVec<H160, GetArrayLimit>,
336
			limit: u32,
337
7
		) -> DispatchResultWithPostInfo {
338
7
			ensure_signed(origin)?;
339

            
340
7
			ensure!(limit != 0, Error::<T>::LimitCannotBeZero);
341
7
			ensure!(
342
7
				addresses.len() != 0,
343
1
				Error::<T>::AddressesLengthCannotBeZero
344
			);
345

            
346
6
			let mut limit = limit as usize;
347

            
348
12
			for address in &addresses {
349
				// Ensure that the contract is corrupted by checking
350
				// that it has no code and at least one storage entry.
351
10
				let suicided = pallet_evm::Suicided::<T>::contains_key(&address);
352
10
				let has_code = pallet_evm::AccountCodes::<T>::contains_key(&address);
353
10
				ensure!(
354
10
					!suicided
355
9
						&& !has_code && pallet_evm::AccountStorages::<T>::iter_key_prefix(&address)
356
7
						.next()
357
7
						.is_some(),
358
3
					Error::<T>::ContractNotCorrupted
359
				);
360

            
361
7
				let deleted = pallet_evm::AccountStorages::<T>::drain_prefix(*address)
362
7
					.take(limit)
363
7
					.count();
364
7

            
365
7
				// Check if the storage of this contract has been completly removed
366
7
				if pallet_evm::AccountStorages::<T>::iter_key_prefix(&address)
367
7
					.next()
368
7
					.is_none()
369
6
				{
370
6
					// All entries got removed, lets count this address as migrated
371
6
					SuicidedContractsRemoved::<T>::mutate(|x| *x = x.saturating_add(1));
372
6
				}
373

            
374
7
				limit = limit.saturating_sub(deleted);
375
7
				if limit == 0 {
376
1
					return Ok(Pays::No.into());
377
6
				}
378
			}
379
2
			Ok(Pays::No.into())
380
		}
381
		#[pallet::call_index(2)]
382
		#[pallet::weight(Pallet::<T>::create_contract_metadata_weight(MAX_CONTRACT_CODE_SIZE))]
383
		pub fn create_contract_metadata(
384
			origin: OriginFor<T>,
385
			address: H160,
386
3
		) -> DispatchResultWithPostInfo {
387
3
			ensure_signed(origin)?;
388

            
389
3
			ensure!(
390
3
				pallet_evm::AccountCodesMetadata::<T>::get(address).is_none(),
391
1
				Error::<T>::ContractMetadataAlreadySet
392
			);
393

            
394
			// Ensure contract exist
395
2
			let code = pallet_evm::AccountCodes::<T>::get(address);
396
2
			ensure!(!code.is_empty(), Error::<T>::ContractNotExist);
397

            
398
			// Construct metadata
399
1
			let code_size = code.len() as u64;
400
1
			let code_hash = sp_core::H256::from(sp_io::hashing::keccak_256(&code));
401
1
			let meta = pallet_evm::CodeMetadata {
402
1
				size: code_size,
403
1
				hash: code_hash,
404
1
			};
405
1

            
406
1
			// Set metadata
407
1
			pallet_evm::AccountCodesMetadata::<T>::insert(address, meta);
408
1

            
409
1
			Ok((
410
1
				Some(Self::create_contract_metadata_weight(code_size)),
411
1
				Pays::No,
412
1
			)
413
1
				.into())
414
		}
415
	}
416

            
417
	impl<T: Config> Pallet<T> {
418
1
		fn create_contract_metadata_weight(code_size: u64) -> Weight {
419
1
			// max entry size of AccountCodesMetadata (full key + value)
420
1
			const PROOF_SIZE_CODE_METADATA: u64 = 100;
421
1
			// intermediates nodes might be up to 3Kb
422
1
			const PROOF_SIZE_INTERMEDIATES_NODES: u64 = 3 * 1024;
423
1

            
424
1
			// Account for 2 reads, 1 write
425
1
			<T as frame_system::Config>::DbWeight::get()
426
1
				.reads_writes(2, 1)
427
1
				.set_proof_size(
428
1
					code_size + (PROOF_SIZE_INTERMEDIATES_NODES * 2) + PROOF_SIZE_CODE_METADATA,
429
1
				)
430
1
		}
431
	}
432
}