From 447e0a34373b97139b62f5df9f6f2780f87f25ed Mon Sep 17 00:00:00 2001 From: Keith Date: Fri, 26 Apr 2024 22:55:51 +0800 Subject: [PATCH 01/10] Ban direct indexing and enable CI to detect it --- .github/workflows/check-rust.yml | 4 +- pallets/commitments/src/types.rs | 4 +- pallets/subtensor/src/epoch.rs | 148 ++++---- pallets/subtensor/src/math.rs | 524 +++++++++++--------------- pallets/subtensor/src/registration.rs | 144 ++----- pallets/subtensor/src/root.rs | 33 +- pallets/subtensor/src/serving.rs | 2 +- pallets/subtensor/src/utils.rs | 101 ++--- pallets/subtensor/src/weights.rs | 5 +- 9 files changed, 370 insertions(+), 595 deletions(-) diff --git a/.github/workflows/check-rust.yml b/.github/workflows/check-rust.yml index 599f14589..8a59ec1ba 100644 --- a/.github/workflows/check-rust.yml +++ b/.github/workflows/check-rust.yml @@ -269,8 +269,8 @@ jobs: run: | cargo clippy -- -D clippy::panic \ -D clippy::todo \ - -D clippy::unimplemented -# -D clippy::indexing_slicing \ + -D clippy::unimplemented \ + -D clippy::indexing_slicing # -D clippy::unwrap_used \ # ensures cargo fix has no trivial changes that can be applied diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index 9de95ec13..3502528cc 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -87,8 +87,8 @@ impl Encode for Data { Data::None => vec![0u8; 1], Data::Raw(ref x) => { let l = x.len().min(128); - let mut r = vec![l as u8 + 1; l + 1]; - r[1..].copy_from_slice(&x[..l as usize]); + let mut r = vec![l as u8 + 1]; + r.extend_from_slice(&x[..]); r } Data::BlakeTwo256(ref h) => once(130).chain(h.iter().cloned()).collect(), diff --git a/pallets/subtensor/src/epoch.rs b/pallets/subtensor/src/epoch.rs index 4a485bf4b..3ba07369e 100644 --- a/pallets/subtensor/src/epoch.rs +++ b/pallets/subtensor/src/epoch.rs @@ -7,6 +7,7 @@ use substrate_fixed::types::{I32F32, I64F64, I96F32}; impl Pallet { // Calculates reward consensus and returns the emissions for uids/hotkeys in a given `netuid`. // (Dense version used only for testing purposes.) + #[allow(clippy::indexing_slicing)] pub fn epoch_dense(netuid: u16, rao_emission: u64) -> Vec<(T::AccountId, u64, u64)> { // Get subnetwork size. let n: u16 = Self::get_subnetwork_n(netuid); @@ -58,17 +59,13 @@ impl Pallet { // == Stake == // =========== - let mut hotkeys: Vec<(u16, T::AccountId)> = vec![]; - for (uid_i, hotkey) in - as IterableStorageDoubleMap>::iter_prefix(netuid) - { - hotkeys.push((uid_i, hotkey)); - } + let hotkeys: Vec<(u16, T::AccountId)> = + as IterableStorageDoubleMap>::iter_prefix(netuid).collect(); log::trace!("hotkeys: {:?}", &hotkeys); // Access network stake as normalized vector. let mut stake_64: Vec = vec![I64F64::from_num(0.0); n as usize]; - for (uid_i, hotkey) in hotkeys.iter() { + for (uid_i, hotkey) in &hotkeys { stake_64[*uid_i as usize] = I64F64::from_num(Self::get_total_stake_for_hotkey(hotkey)); } inplace_normalize_64(&mut stake_64); @@ -309,29 +306,24 @@ impl Pallet { // Column max-upscale EMA bonds for storage: max_i w_ij = 1. inplace_col_max_upscale(&mut ema_bonds); - for i in 0..n { + new_validator_permits.iter().zip(validator_permits).zip(ema_bonds).enumerate().for_each(|(i, ((new_permit, validator_permit), ema_bond))| { // Set bonds only if uid retains validator permit, otherwise clear bonds. - if new_validator_permits[i as usize] { + if *new_permit { let new_bonds_row: Vec<(u16, u16)> = (0..n) - .zip(vec_fixed_proportions_to_u16(ema_bonds[i as usize].clone())) + .zip(vec_fixed_proportions_to_u16(ema_bond.clone())) .collect(); - Bonds::::insert(netuid, i, new_bonds_row); - } else if validator_permits[i as usize] { + Bonds::::insert(netuid, i as u16, new_bonds_row); + } else if validator_permit { // Only overwrite the intersection. let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; - Bonds::::insert(netuid, i, new_empty_bonds_row); + Bonds::::insert(netuid, i as u16, new_empty_bonds_row); } - } + }); - let mut result: Vec<(T::AccountId, u64, u64)> = vec![]; - for (uid_i, hotkey) in hotkeys.iter() { - result.push(( - hotkey.clone(), - server_emission[*uid_i as usize], - validator_emission[*uid_i as usize], - )); - } - result + hotkeys.into_iter().map(|(uid_i, hotkey)| { + (hotkey, server_emission[uid_i as usize], validator_emission[uid_i as usize]) + }) + .collect() } // Calculates reward consensus values, then updates rank, trust, consensus, incentive, dividend, pruning_score, emission and bonds, and @@ -347,6 +339,7 @@ impl Pallet { // * 'debug' ( bool ): // - Print debugging outputs. // + #[allow(clippy::indexing_slicing)] pub fn epoch(netuid: u16, rao_emission: u64) -> Vec<(T::AccountId, u64, u64)> { // Get subnetwork size. let n: u16 = Self::get_subnetwork_n(netuid); @@ -386,17 +379,13 @@ impl Pallet { // == Stake == // =========== - let mut hotkeys: Vec<(u16, T::AccountId)> = vec![]; - for (uid_i, hotkey) in - as IterableStorageDoubleMap>::iter_prefix(netuid) - { - hotkeys.push((uid_i, hotkey)); - } + let hotkeys: Vec<(u16, T::AccountId)> = + as IterableStorageDoubleMap>::iter_prefix(netuid).collect(); log::trace!("hotkeys: {:?}", &hotkeys); // Access network stake as normalized vector. let mut stake_64: Vec = vec![I64F64::from_num(0.0); n as usize]; - for (uid_i, hotkey) in hotkeys.iter() { + for (uid_i, hotkey) in &hotkeys { stake_64[*uid_i as usize] = I64F64::from_num(Self::get_total_stake_for_hotkey(hotkey)); } inplace_normalize_64(&mut stake_64); @@ -665,31 +654,31 @@ impl Pallet { // Column max-upscale EMA bonds for storage: max_i w_ij = 1. inplace_col_max_upscale_sparse(&mut ema_bonds, n); - for i in 0..n { - // Set bonds only if uid retains validator permit, otherwise clear bonds. - if new_validator_permits[i as usize] { - let new_bonds_row: Vec<(u16, u16)> = ema_bonds[i as usize] - .iter() - .map(|(j, value)| (*j, fixed_proportion_to_u16(*value))) - .collect(); - Bonds::::insert(netuid, i, new_bonds_row); - } else if validator_permits[i as usize] { - // Only overwrite the intersection. - let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; - Bonds::::insert(netuid, i, new_empty_bonds_row); - } - } + new_validator_permits + .iter() + .zip(validator_permits) + .zip(ema_bonds) + .enumerate() + .for_each(|(i, ((new_permit, validator_permit), ema_bond))| { + // Set bonds only if uid retains validator permit, otherwise clear bonds. + if *new_permit { + let new_bonds_row: Vec<(u16, u16)> = ema_bond + .iter() + .map(|(j, value)| (*j, fixed_proportion_to_u16(*value))) + .collect(); + Bonds::::insert(netuid, i as u16, new_bonds_row); + } else if validator_permit { + // Only overwrite the intersection. + let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; + Bonds::::insert(netuid, i as u16, new_empty_bonds_row); + } + }); // Emission tuples ( hotkeys, server_emission, validator_emission ) - let mut result: Vec<(T::AccountId, u64, u64)> = vec![]; - for (uid_i, hotkey) in hotkeys.iter() { - result.push(( - hotkey.clone(), - server_emission[*uid_i as usize], - validator_emission[*uid_i as usize], - )); - } - result + hotkeys.into_iter().map(|(uid_i, hotkey)| { + (hotkey, server_emission[uid_i as usize], validator_emission[uid_i as usize]) + }) + .collect() } pub fn get_float_rho(netuid: u16) -> I32F32 { @@ -700,45 +689,39 @@ impl Pallet { } pub fn get_normalized_stake(netuid: u16) -> Vec { - let n: usize = Self::get_subnetwork_n(netuid) as usize; - let mut stake_64: Vec = vec![I64F64::from_num(0.0); n]; - for neuron_uid in 0..n { - stake_64[neuron_uid] = I64F64::from_num(Self::get_stake_for_uid_and_subnetwork( - netuid, - neuron_uid as u16, - )); - } + let n = Self::get_subnetwork_n(netuid); + let mut stake_64: Vec = (0..n).map(|neuron_uid| I64F64::from_num( + Self::get_stake_for_uid_and_subnetwork(netuid, neuron_uid), + )) + .collect(); inplace_normalize_64(&mut stake_64); let stake: Vec = vec_fixed64_to_fixed32(stake_64); stake } pub fn get_block_at_registration(netuid: u16) -> Vec { - let n: usize = Self::get_subnetwork_n(netuid) as usize; - let mut block_at_registration: Vec = vec![0; n]; - for neuron_uid in 0..n { - if Keys::::contains_key(netuid, neuron_uid as u16) { - block_at_registration[neuron_uid] = - Self::get_neuron_block_at_registration(netuid, neuron_uid as u16); + let n = Self::get_subnetwork_n(netuid); + let block_at_registration: Vec = (0..n).map(|neuron_uid| { + if Keys::::contains_key(netuid, neuron_uid) { + Self::get_neuron_block_at_registration(netuid, neuron_uid) + } else { + 0 } - } + }) + .collect(); block_at_registration } // Output unnormalized sparse weights, input weights are assumed to be row max-upscaled in u16. + #[allow(clippy::indexing_slicing)] pub fn get_weights_sparse(netuid: u16) -> Vec> { let n: usize = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![]; n]; for (uid_i, weights_i) in as IterableStorageDoubleMap>>::iter_prefix(netuid) + .filter(|(uid_i, _)| *uid_i < n as u16) { - if uid_i >= n as u16 { - continue; - } - for (uid_j, weight_ij) in weights_i.iter() { - if *uid_j >= n as u16 { - continue; - } + for (uid_j, weight_ij) in weights_i.iter().filter(|(uid_j, _)| *uid_j < n as u16) { weights[uid_i as usize].push((*uid_j, I32F32::from_num(*weight_ij))); } } @@ -746,42 +729,45 @@ impl Pallet { } // Output unnormalized weights in [n, n] matrix, input weights are assumed to be row max-upscaled in u16. + #[allow(clippy::indexing_slicing)] pub fn get_weights(netuid: u16) -> Vec> { let n: usize = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![I32F32::from_num(0.0); n]; n]; for (uid_i, weights_i) in as IterableStorageDoubleMap>>::iter_prefix(netuid) { - for (uid_j, weight_ij) in weights_i.iter() { - weights[uid_i as usize][*uid_j as usize] = I32F32::from_num(*weight_ij); + for (uid_j, weight_ij) in weights_i { + weights[uid_i as usize][uid_j as usize] = I32F32::from_num(weight_ij); } } weights } // Output unnormalized sparse bonds, input bonds are assumed to be column max-upscaled in u16. + #[allow(clippy::indexing_slicing)] pub fn get_bonds_sparse(netuid: u16) -> Vec> { let n: usize = Self::get_subnetwork_n(netuid) as usize; let mut bonds: Vec> = vec![vec![]; n]; for (uid_i, bonds_i) in as IterableStorageDoubleMap>>::iter_prefix(netuid) { - for (uid_j, bonds_ij) in bonds_i.iter() { - bonds[uid_i as usize].push((*uid_j, I32F32::from_num(*bonds_ij))); + for (uid_j, bonds_ij) in bonds_i { + bonds[uid_i as usize].push((uid_j, I32F32::from_num(bonds_ij))); } } bonds } // Output unnormalized bonds in [n, n] matrix, input bonds are assumed to be column max-upscaled in u16. + #[allow(clippy::indexing_slicing)] pub fn get_bonds(netuid: u16) -> Vec> { let n: usize = Self::get_subnetwork_n(netuid) as usize; let mut bonds: Vec> = vec![vec![I32F32::from_num(0.0); n]; n]; for (uid_i, bonds_i) in as IterableStorageDoubleMap>>::iter_prefix(netuid) { - for (uid_j, bonds_ij) in bonds_i.iter() { - bonds[uid_i as usize][*uid_j as usize] = I32F32::from_num(*bonds_ij); + for (uid_j, bonds_ij) in bonds_i { + bonds[uid_i as usize][uid_j as usize] = I32F32::from_num(bonds_ij); } } bonds diff --git a/pallets/subtensor/src/math.rs b/pallets/subtensor/src/math.rs index 30633fc4f..362530ef7 100644 --- a/pallets/subtensor/src/math.rs +++ b/pallets/subtensor/src/math.rs @@ -54,22 +54,22 @@ pub fn fixed_proportion_to_u16(x: I32F32) -> u16 { #[allow(dead_code)] pub fn vec_fixed32_to_u64(vec: Vec) -> Vec { - vec.into_iter().map(|e| fixed_to_u64(e)).collect() + vec.into_iter().map(fixed_to_u64).collect() } #[allow(dead_code)] pub fn vec_fixed64_to_fixed32(vec: Vec) -> Vec { - vec.into_iter().map(|e| fixed64_to_fixed32(e)).collect() + vec.into_iter().map(fixed64_to_fixed32).collect() } #[allow(dead_code)] pub fn vec_fixed32_to_fixed64(vec: Vec) -> Vec { - vec.into_iter().map(|e| fixed32_to_fixed64(e)).collect() + vec.into_iter().map(fixed32_to_fixed64).collect() } #[allow(dead_code)] pub fn vec_fixed64_to_u64(vec: Vec) -> Vec { - vec.into_iter().map(|e| fixed64_to_u64(e)).collect() + vec.into_iter().map(fixed64_to_u64).collect() } #[allow(dead_code)] @@ -135,14 +135,7 @@ pub fn check_vec_max_limited(vec: &Vec, max_limit: u16) -> bool { let mut vec_fixed: Vec = vec.iter().map(|e: &u16| I32F32::from_num(*e)).collect(); inplace_normalize(&mut vec_fixed); let max_value: Option<&I32F32> = vec_fixed.iter().max(); - match max_value { - Some(val) => { - return *val <= max_limit_fixed; - } - None => { - return true; - } - } + max_value.map_or(true, |v| *v <= max_limit_fixed) } #[allow(dead_code)] @@ -158,12 +151,11 @@ pub fn checked_sum(x: &Vec) -> Option where T: Copy + Default + CheckedAdd, { - if x.len() == 0 { + let mut iter = x.iter(); + let Some(mut sum) = iter.next().copied() else { return Some(T::default()); - } - - let mut sum: T = x[0]; - for i in x[1..].iter() { + }; + while let Some(i) = iter.next() { match sum.checked_add(i) { Some(val) => sum = val, None => return None, @@ -220,7 +212,7 @@ pub fn sigmoid_safe(input: I32F32, rho: I32F32, kappa: I32F32) -> I32F32 { } // Returns a bool vector where an item is true if the vector item is in topk values. -#[allow(dead_code)] +#[allow(dead_code, clippy::indexing_slicing)] pub fn is_topk(vector: &Vec, k: usize) -> Vec { let n: usize = vector.len(); let mut result: Vec = vec![true; n]; @@ -229,7 +221,7 @@ pub fn is_topk(vector: &Vec, k: usize) -> Vec { } let mut idxs: Vec = (0..n).collect(); idxs.sort_by_key(|&idx| &vector[idx]); // ascending stable sort - for &idx in &idxs[0..(n - k)] { + for &idx in idxs.iter().take(n - k) { result[idx] = false; } result @@ -253,9 +245,7 @@ pub fn inplace_normalize(x: &mut Vec) { if x_sum == I32F32::from_num(0.0 as f32) { return; } - for i in 0..x.len() { - x[i] = x[i] / x_sum; - } + x.into_iter().for_each(|elem| *elem /= x_sum); } // Normalizes (sum to 1 except 0) the input vector directly in-place, using the sum arg. @@ -264,9 +254,7 @@ pub fn inplace_normalize_using_sum(x: &mut Vec, x_sum: I32F32) { if x_sum == I32F32::from_num(0.0 as f32) { return; } - for i in 0..x.len() { - x[i] = x[i] / x_sum; - } + x.into_iter().for_each(|elem| *elem /= x_sum); } // Normalizes (sum to 1 except 0) the I64F64 input vector directly in-place. @@ -276,32 +264,30 @@ pub fn inplace_normalize_64(x: &mut Vec) { if x_sum == I64F64::from_num(0) { return; } - for i in 0..x.len() { - x[i] = x[i] / x_sum; - } + x.into_iter().for_each(|value| *value /= x_sum); } /// Returns x / y for input vectors x and y, if y == 0 return 0. #[allow(dead_code)] pub fn vecdiv(x: &Vec, y: &Vec) -> Vec { assert_eq!(x.len(), y.len()); - let n = x.len(); - let mut result: Vec = vec![I32F32::from_num(0); n]; - for i in 0..n { - if y[i] != 0 { - result[i] = x[i] / y[i]; + x.iter().zip(y).map(|(x_i, y_i)| { + if *y_i != 0 { + x_i / y_i + } else { + I32F32::from_num(0) } - } - result + }) + .collect() } // Normalizes (sum to 1 except 0) each row (dim=0) of a matrix in-place. #[allow(dead_code)] pub fn inplace_row_normalize(x: &mut Vec>) { - for i in 0..x.len() { - let row_sum: I32F32 = x[i].iter().sum(); + for row in x { + let row_sum: I32F32 = row.iter().sum(); if row_sum > I32F32::from_num(0.0 as f32) { - x[i].iter_mut() + row.into_iter() .for_each(|x_ij: &mut I32F32| *x_ij /= row_sum); } } @@ -323,61 +309,41 @@ pub fn inplace_row_normalize_sparse(sparse_matrix: &mut Vec>) // Sum across each row (dim=0) of a matrix. #[allow(dead_code)] pub fn row_sum(x: &Vec>) -> Vec { - if x.len() == 0 { - return vec![]; - } - if x[0].len() == 0 { - return vec![]; - } - let rows = x.len(); - let mut result: Vec = vec![I32F32::from_num(0); rows]; - for i in 0..x.len() { - for j in 0..x[i].len() { - result[i] += x[i][j]; + if let Some(first_row) = x.first() { + if first_row.is_empty() { + return vec![]; } } - result + x.into_iter().map(|row| row.into_iter().sum()).collect() } // Sum across each row (dim=0) of a sparse matrix. #[allow(dead_code)] pub fn row_sum_sparse(sparse_matrix: &Vec>) -> Vec { - let rows = sparse_matrix.len(); - let mut result: Vec = vec![I32F32::from_num(0); rows]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (_j, value) in sparse_row.iter() { - result[i] += value; - } - } - result + sparse_matrix.into_iter().map(|row| row.into_iter().map(|(_, value)| value).sum()).collect() } // Sum across each column (dim=1) of a matrix. #[allow(dead_code)] pub fn col_sum(x: &Vec>) -> Vec { - if x.len() == 0 { + let Some(first_row) = x.first() else { return vec![]; - } - if x[0].len() == 0 { + }; + let cols = first_row.len(); + if cols == 0 { return vec![]; } - let cols = x[0].len(); - let mut result: Vec = vec![I32F32::from_num(0); cols]; - for i in 0..x.len() { - assert_eq!(x[i].len(), cols); - for j in 0..cols { - result[j] += x[i][j]; - } - } - result + x.into_iter().fold(vec![I32F32::from_num(0); cols], |acc, next_row| { + acc.into_iter().zip(next_row).map(|(acc_elem, next_elem)| acc_elem + next_elem).collect() + }) } // Sum across each column (dim=1) of a sparse matrix. -#[allow(dead_code)] +#[allow(dead_code, clippy::indexing_slicing)] pub fn col_sum_sparse(sparse_matrix: &Vec>, columns: u16) -> Vec { let mut result: Vec = vec![I32F32::from_num(0); columns as usize]; - for sparse_row in sparse_matrix.iter() { - for (j, value) in sparse_row.iter() { + for sparse_row in sparse_matrix { + for (j, value) in sparse_row { result[*j as usize] += value; } } @@ -385,7 +351,7 @@ pub fn col_sum_sparse(sparse_matrix: &Vec>, columns: u16) -> } // Normalizes (sum to 1 except 0) each column (dim=1) of a sparse matrix in-place. -#[allow(dead_code)] +#[allow(dead_code, clippy::indexing_slicing)] pub fn inplace_col_normalize_sparse(sparse_matrix: &mut Vec>, columns: u16) { let mut col_sum: Vec = vec![I32F32::from_num(0.0); columns as usize]; // assume square matrix, rows=cols for sparse_row in sparse_matrix.iter() { @@ -393,8 +359,8 @@ pub fn inplace_col_normalize_sparse(sparse_matrix: &mut Vec>, col_sum[*j as usize] += value; } } - for sparse_row in sparse_matrix.iter_mut() { - for (j, value) in sparse_row.iter_mut() { + for sparse_row in sparse_matrix { + for (j, value) in sparse_row { if col_sum[*j as usize] == I32F32::from_num(0.0 as f32) { continue; } @@ -406,32 +372,28 @@ pub fn inplace_col_normalize_sparse(sparse_matrix: &mut Vec>, // Normalizes (sum to 1 except 0) each column (dim=1) of a matrix in-place. #[allow(dead_code)] pub fn inplace_col_normalize(x: &mut Vec>) { - if x.len() == 0 { + let Some(first_row) = x.first() else { return; - } - if x[0].len() == 0 { + }; + if first_row.is_empty() { return; } - let cols = x[0].len(); - let mut col_sum: Vec = vec![I32F32::from_num(0.0); cols]; - for i in 0..x.len() { - assert_eq!(x[i].len(), cols); - for j in 0..cols { - col_sum[j] += x[i][j]; - } - } - for j in 0..cols { - if col_sum[j] == I32F32::from_num(0.0 as f32) { - continue; - } - for i in 0..x.len() { - x[i][j] /= col_sum[j]; - } - } + let cols = first_row.len(); + let col_sums = x.into_iter().fold(vec![I32F32::from_num(0.0); cols], |acc, row| { + row.into_iter().zip(acc).map(|(&mut m_val, acc_val)| { + acc_val + m_val + }) + .collect() + }); + x.into_iter().for_each(|row| { + row.into_iter().zip(&col_sums).filter(|(_, col_sum)| **col_sum != I32F32::from_num(0)).for_each(|(m_val, col_sum)| { + *m_val /= col_sum; + }); + }); } // Max-upscale each column (dim=1) of a sparse matrix in-place. -#[allow(dead_code)] +#[allow(dead_code, clippy::indexing_slicing)] pub fn inplace_col_max_upscale_sparse(sparse_matrix: &mut Vec>, columns: u16) { let mut col_max: Vec = vec![I32F32::from_num(0.0); columns as usize]; // assume square matrix, rows=cols for sparse_row in sparse_matrix.iter() { @@ -441,8 +403,8 @@ pub fn inplace_col_max_upscale_sparse(sparse_matrix: &mut Vec } } } - for sparse_row in sparse_matrix.iter_mut() { - for (j, value) in sparse_row.iter_mut() { + for sparse_row in sparse_matrix { + for (j, value) in sparse_row { if col_max[*j as usize] == I32F32::from_num(0.0 as f32) { continue; } @@ -454,98 +416,91 @@ pub fn inplace_col_max_upscale_sparse(sparse_matrix: &mut Vec // Max-upscale each column (dim=1) of a matrix in-place. #[allow(dead_code)] pub fn inplace_col_max_upscale(x: &mut Vec>) { - if x.len() == 0 { + let Some(first_row) = x.first() else { return; - } - if x[0].len() == 0 { + }; + if first_row.is_empty() { return; } - let cols = x[0].len(); - let mut col_max: Vec = vec![I32F32::from_num(0.0); cols]; - for i in 0..x.len() { - assert_eq!(x[i].len(), cols); - for j in 0..cols { - if col_max[j] < x[i][j] { - col_max[j] = x[i][j]; - } - } - } - for j in 0..cols { - if col_max[j] == I32F32::from_num(0.0 as f32) { - continue; - } - for i in 0..x.len() { - x[i][j] /= col_max[j]; - } - } + let cols = first_row.len(); + let col_maxes = x.into_iter().fold(vec![I32F32::from_num(0); cols], |acc, row| { + row.into_iter().zip(acc).map(|(m_val, acc_val)| { + acc_val.max(*m_val) + }) + .collect() + }); + x.into_iter().for_each(|row| { + row.into_iter().zip(&col_maxes).filter(|(_, col_max)| **col_max != I32F32::from_num(0)).for_each(|(m_val, col_max)| { + *m_val /= col_max; + }); + }); } // Apply mask to vector, mask=true will mask out, i.e. set to 0. #[allow(dead_code)] pub fn inplace_mask_vector(mask: &Vec, vector: &mut Vec) { - if mask.len() == 0 { + if mask.is_empty() { return; } assert_eq!(mask.len(), vector.len()); let zero: I32F32 = I32F32::from_num(0.0); - for i in 0..mask.len() { - if mask[i] { - vector[i] = zero; - } - } + mask.into_iter().zip(vector).filter(|(m, _)| **m).for_each(|(_, v_elem)| { + *v_elem = zero; + }); } // Apply mask to matrix, mask=true will mask out, i.e. set to 0. #[allow(dead_code)] pub fn inplace_mask_matrix(mask: &Vec>, matrix: &mut Vec>) { - if mask.len() == 0 { + let Some(first_row) = mask.first() else { return; - } - if mask[0].len() == 0 { + }; + if first_row.is_empty() { return; } assert_eq!(mask.len(), matrix.len()); let zero: I32F32 = I32F32::from_num(0.0); - for i in 0..mask.len() { - for j in 0..mask[i].len() { - if mask[i][j] { - matrix[i][j] = zero; - } - } - } + mask.into_iter().zip(matrix).for_each(|(mask_row, matrix_row)| { + mask_row.into_iter().zip(matrix_row).filter(|(mask_elem, _)| **mask_elem).for_each(|(_, matrix_elem)| { + *matrix_elem = zero; + }); + }); } // Apply row mask to matrix, mask=true will mask out, i.e. set to 0. #[allow(dead_code)] pub fn inplace_mask_rows(mask: &Vec, matrix: &mut Vec>) { - let rows = matrix.len(); - if rows == 0 { + let Some(first_row) = matrix.first() else { return; - } - let cols = matrix[0].len(); - assert_eq!(mask.len(), rows); + }; + let cols = first_row.len(); + assert_eq!(mask.len(), matrix.len()); let zero: I32F32 = I32F32::from_num(0); - for i in 0..rows { - if mask[i] { - matrix[i] = vec![zero; cols]; + matrix.into_iter().zip(mask).for_each(|(row_elem, mask_row)| { + if *mask_row { + *row_elem = vec![zero; cols]; } - } + }); } // Mask out the diagonal of the input matrix in-place. #[allow(dead_code)] pub fn inplace_mask_diag(matrix: &mut Vec>) { - if matrix.len() == 0 { + let Some(first_row) = matrix.first() else { return; - } - if matrix[0].len() == 0 { + }; + if first_row.is_empty() { return; } - assert_eq!(matrix.len(), matrix[0].len()); + assert_eq!(matrix.len(), first_row.len()); let zero: I32F32 = I32F32::from_num(0.0); - for i in 0..matrix.len() { - matrix[i][i] = zero; - } + matrix.into_iter().enumerate().for_each(|(idx, row)| { + let Some(elem) = row.into_iter().nth(idx) else { + // Should not happen since matrix is square + return; + }; + *elem = zero; + }); } // Return a new sparse matrix that replaces masked rows with an empty vector placeholder. @@ -554,34 +509,28 @@ pub fn mask_rows_sparse( mask: &Vec, sparse_matrix: &Vec>, ) -> Vec> { - let n: usize = sparse_matrix.len(); - assert_eq!(n, mask.len()); - let mut result: Vec> = vec![vec![]; n]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - if !mask[i] { - result[i] = sparse_row.clone(); + assert_eq!(sparse_matrix.len(), mask.len()); + mask.into_iter().zip(sparse_matrix).map(|(mask_elem, sparse_row)| { + if *mask_elem { + vec![] + } else { + sparse_row.clone() } - } - result + }) + .collect() } // Return a new sparse matrix with a masked out diagonal of input sparse matrix. #[allow(dead_code)] pub fn mask_diag_sparse(sparse_matrix: &Vec>) -> Vec> { - let n: usize = sparse_matrix.len(); - let mut result: Vec> = vec![vec![]; n]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row.iter() { - if i != (*j as usize) { - result[i].push((*j, *value)); - } - } - } - result + sparse_matrix.into_iter().enumerate().map(|(i, sparse_row)| { + sparse_row.into_iter().filter(|(j, _)| i != (*j as usize)).copied().collect() + }) + .collect() } // Remove cells from sparse matrix where the mask function of two vectors is true. -#[allow(dead_code)] +#[allow(dead_code, clippy::indexing_slicing)] pub fn vec_mask_sparse_matrix( sparse_matrix: &Vec>, first_vector: &Vec, @@ -591,7 +540,7 @@ pub fn vec_mask_sparse_matrix( let n: usize = sparse_matrix.len(); let mut result: Vec> = vec![vec![]; n]; for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row.iter() { + for (j, value) in sparse_row { if !mask_fn(first_vector[i], second_vector[*j as usize]) { result[i].push((*j, *value)); } @@ -603,20 +552,17 @@ pub fn vec_mask_sparse_matrix( // Row-wise matrix-vector hadamard product. #[allow(dead_code)] pub fn row_hadamard(matrix: &Vec>, vector: &Vec) -> Vec> { - if matrix.len() == 0 { + let Some(first_row) = matrix.first() else { return vec![vec![]]; - } - if matrix[0].len() == 0 { + }; + let cols = first_row.len(); + if cols == 0 { return vec![vec![]]; } - let mut result: Vec> = - vec![vec![I32F32::from_num(0.0); matrix[0].len()]; matrix.len()]; - for i in 0..matrix.len() { - for j in 0..matrix[i].len() { - result[i][j] = vector[i] * matrix[i][j]; - } - } - result + matrix.into_iter().zip(vector).map(|(row, vec_val)| { + row.into_iter().map(|m_val| vec_val * m_val).collect() + }) + .collect() } // Row-wise sparse matrix-vector hadamard product. @@ -625,83 +571,76 @@ pub fn row_hadamard_sparse( sparse_matrix: &Vec>, vector: &Vec, ) -> Vec> { - let mut result: Vec> = sparse_matrix.clone(); - for (i, sparse_row) in result.iter_mut().enumerate() { - for (_j, value) in sparse_row.iter_mut() { - *value *= vector[i]; - } - } - result + sparse_matrix.into_iter().zip(vector).map(|(sparse_row, vec_val)| { + sparse_row.into_iter().map(|(j, value)| (*j, *value * *vec_val)).collect() + }) + .collect() } // Row-wise matrix-vector product, column-wise sum: result_j = SUM(i) vector_i * matrix_ij. #[allow(dead_code)] pub fn matmul(matrix: &Vec>, vector: &Vec) -> Vec { - if matrix.len() == 0 { + let Some(first_row) = matrix.first() else { return vec![]; - } - if matrix[0].len() == 0 { + }; + let cols = first_row.len(); + if cols == 0 { return vec![]; } assert!(matrix.len() == vector.len()); - let mut result: Vec = vec![I32F32::from_num(0.0); matrix[0].len()]; - for i in 0..matrix.len() { - for j in 0..matrix[i].len() { - // Compute ranks: r_j = SUM(i) w_ij * s_i - // Compute trust scores: t_j = SUM(i) w_ij * s_i - // result_j = SUM(i) vector_i * matrix_ij - result[j] += vector[i] * matrix[i][j]; - } - } - result + matrix.into_iter().zip(vector).fold(vec![I32F32::from_num(0); cols], |acc, (row, vec_val)| { + row.into_iter().zip(acc).map(|(m_val, acc_val)| { + acc_val + vec_val * m_val + }) + .collect() + }) } // Row-wise matrix-vector product, column-wise sum: result_j = SUM(i) vector_i * matrix_ij. #[allow(dead_code)] pub fn matmul_64(matrix: &Vec>, vector: &Vec) -> Vec { - if matrix.len() == 0 { + let Some(first_row) = matrix.first() else { return vec![]; - } - if matrix[0].len() == 0 { + }; + let cols = first_row.len(); + if cols == 0 { return vec![]; } assert!(matrix.len() == vector.len()); - let mut result: Vec = vec![I64F64::from_num(0.0); matrix[0].len()]; - for i in 0..matrix.len() { - for j in 0..matrix[i].len() { + matrix.into_iter().zip(vector).fold(vec![I64F64::from_num(0.0); cols], |acc, (row, vec_val)| { + row.into_iter().zip(acc).map(|(m_val, acc_val)| { // Compute ranks: r_j = SUM(i) w_ij * s_i // Compute trust scores: t_j = SUM(i) w_ij * s_i // result_j = SUM(i) vector_i * matrix_ij - result[j] += vector[i] * matrix[i][j]; - } - } - result + acc_val + vec_val * m_val + }) + .collect() + }) } // Column-wise matrix-vector product, row-wise sum: result_i = SUM(j) vector_j * matrix_ij. #[allow(dead_code)] pub fn matmul_transpose(matrix: &Vec>, vector: &Vec) -> Vec { - if matrix.len() == 0 { + let Some(first_row) = matrix.first() else { return vec![]; - } - if matrix[0].len() == 0 { + }; + if first_row.is_empty() { return vec![]; } - assert!(matrix[0].len() == vector.len()); - let mut result: Vec = vec![I32F32::from_num(0.0); matrix.len()]; - for i in 0..matrix.len() { - for j in 0..matrix[i].len() { + assert!(first_row.len() == vector.len()); + matrix.into_iter().map(|row| { + row.into_iter().zip(vector).fold(I32F32::from_num(0), |acc, (velem, melem)| { // Compute dividends: d_j = SUM(i) b_ji * inc_i // result_j = SUM(i) vector_i * matrix_ji // result_i = SUM(j) vector_j * matrix_ij - result[i] += vector[j] * matrix[i][j]; - } - } - result + acc + (velem * melem) + }) + }) + .collect() } // Row-wise sparse_matrix-vector product, column-wise sum: result_j = SUM(i) vector_i * matrix_ij. -#[allow(dead_code)] +#[allow(dead_code, clippy::indexing_slicing)] pub fn matmul_sparse( sparse_matrix: &Vec>, vector: &Vec, @@ -720,7 +659,7 @@ pub fn matmul_sparse( } // Column-wise sparse_matrix-vector product, row-wise sum: result_i = SUM(j) vector_j * matrix_ij. -#[allow(dead_code)] +#[allow(dead_code, clippy::indexing_slicing)] pub fn matmul_transpose_sparse( sparse_matrix: &Vec>, vector: &Vec, @@ -740,17 +679,15 @@ pub fn matmul_transpose_sparse( // Set inplace matrix values above column threshold to threshold value. #[allow(dead_code)] pub fn inplace_col_clip(x: &mut Vec>, col_threshold: &Vec) { - for i in 0..x.len() { - for j in 0..x[i].len() { - if x[i][j] > col_threshold[j] { - x[i][j] = col_threshold[j]; - } - } - } + x.into_iter().for_each(|row| { + row.into_iter().zip(col_threshold).for_each(|(value, threshold)| { + *value = *threshold.min(value); + }); + }); } // Return sparse matrix with values above column threshold set to threshold value. -#[allow(dead_code)] +#[allow(dead_code, clippy::indexing_slicing)] pub fn col_clip_sparse( sparse_matrix: &Vec>, col_threshold: &Vec, @@ -778,33 +715,31 @@ pub fn clip( upper: I32F32, lower: I32F32, ) -> Vec> { - // Check Nill length. - if x.len() == 0 { - return vec![vec![]]; - } - let mut result: Vec> = vec![vec![lower; x[0].len()]; x.len()]; - for i in 0..x.len() { - for j in 0..x[i].len() { - if x[i][j] >= threshold { - result[i][j] = upper; + x.into_iter().map(|row| { + row.into_iter().map(|elem| { + if *elem >= threshold { + upper + } else { + lower } - } - } - result + }) + .collect() + }) + .collect() } // Set inplace matrix values below threshold to lower, and equal-above to upper. #[allow(dead_code)] pub fn inplace_clip(x: &mut Vec>, threshold: I32F32, upper: I32F32, lower: I32F32) { - for i in 0..x.len() { - for j in 0..x[i].len() { - if x[i][j] >= threshold { - x[i][j] = upper; + x.into_iter().for_each(|row| { + row.into_iter().for_each(|elem| { + *elem = if *elem >= threshold { + upper } else { - x[i][j] = lower; - } - } - } + lower + }; + }); + }); } // Set sparse matrix values below threshold to lower, and equal-above to upper. @@ -816,17 +751,17 @@ pub fn clip_sparse( upper: I32F32, lower: I32F32, ) -> Vec> { - let mut result: Vec> = vec![vec![]; sparse_matrix.len()]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row.iter() { + sparse_matrix.into_iter().map(|row| { + row.into_iter().map(|(j, value)| { if *value < threshold { - result[i].push((*j, lower)); + (*j, lower) } else { - result[i].push((*j, upper)); + (*j, upper) } - } - } - result + }) + .collect() + }) + .collect() } // Stake-weighted median score finding algorithm, based on a mid pivot binary search. @@ -856,7 +791,7 @@ pub fn clip_sparse( // * 'median': ( I32F32 ): // - median via random pivot binary search. // -#[allow(dead_code)] +#[allow(dead_code, clippy::indexing_slicing)] pub fn weighted_median( stake: &Vec, score: &Vec, @@ -879,7 +814,7 @@ pub fn weighted_median( let mut hi_stake: I32F32 = I32F32::from_num(0); let mut lower: Vec = vec![]; let mut upper: Vec = vec![]; - for &idx in partition_idx.iter() { + for &idx in partition_idx { if score[idx] == pivot { continue; } @@ -916,7 +851,7 @@ pub fn weighted_median( } /// Column-wise weighted median, e.g. stake-weighted median scores per server (column) over all validators (rows). -#[allow(dead_code)] +#[allow(dead_code, clippy::indexing_slicing)] pub fn weighted_median_col( stake: &Vec, score: &Vec>, @@ -954,7 +889,7 @@ pub fn weighted_median_col( } /// Column-wise weighted median, e.g. stake-weighted median scores per server (column) over all validators (rows). -#[allow(dead_code)] +#[allow(dead_code, clippy::indexing_slicing)] pub fn weighted_median_col_sparse( stake: &Vec, score: &Vec>, @@ -997,24 +932,21 @@ pub fn weighted_median_col_sparse( #[allow(dead_code)] pub fn hadamard(mat1: &Vec>, mat2: &Vec>) -> Vec> { assert!(mat1.len() == mat2.len()); - if mat1.len() == 0 { + let Some(first_row) = mat1.first() else { return vec![vec![]; 1]; - } - if mat1[0].len() == 0 { + }; + if first_row.is_empty() { return vec![vec![]; 1]; } - let mut result: Vec> = vec![vec![I32F32::from_num(0); mat1[0].len()]; mat1.len()]; - for i in 0..mat1.len() { - assert!(mat1[i].len() == mat2[i].len()); - for j in 0..mat1[i].len() { - result[i][j] = mat1[i][j] * mat2[i][j]; - } - } - result + mat1.iter().zip(mat2).map(|(row1, row2)| { + assert!(row1.len() == row2.len()); + row1.iter().zip(row2).map(|(elem1, elem2)| elem1 * elem2).collect() + }) + .collect() } // Element-wise product of two sparse matrices. -#[allow(dead_code)] +#[allow(dead_code, clippy::indexing_slicing)] pub fn hadamard_sparse( mat1: &Vec>, mat2: &Vec>, @@ -1048,28 +980,26 @@ pub fn hadamard_sparse( // higher alpha discounts older observations faster. #[allow(dead_code)] pub fn mat_ema(new: &Vec>, old: &Vec>, alpha: I32F32) -> Vec> { - if new.len() == 0 { + let Some(first_row) = new.first() else { return vec![vec![]; 1]; - } - if new[0].len() == 0 { + }; + if first_row.is_empty() { return vec![vec![]; 1]; } let one_minus_alpha: I32F32 = I32F32::from_num(1.0) - alpha; - let mut result: Vec> = vec![vec![I32F32::from_num(0.0); new[0].len()]; new.len()]; - assert!(new.len() == old.len()); - for i in 0..new.len() { - assert!(new[i].len() == old[i].len()); - for j in 0..new[i].len() { - result[i][j] = alpha * new[i][j] + one_minus_alpha * old[i][j] - } - } - result + new.iter().zip(old).map(|(new_row, old_row)| { + new_row.iter().zip(old_row).map(|(new_elem, old_elem)| { + alpha * new_elem + one_minus_alpha * old_elem + }) + .collect() + }) + .collect() } // Return sparse matrix exponential moving average: `alpha * a_ij + one_minus_alpha * b_ij`. // `alpha` is the EMA coefficient, how much to add of the new observation, typically small, // higher alpha discounts older observations faster. -#[allow(dead_code)] +#[allow(dead_code, clippy::indexing_slicing)] pub fn mat_ema_sparse( new: &Vec>, old: &Vec>, @@ -1100,18 +1030,14 @@ pub fn mat_ema_sparse( // Return sparse matrix only with elements >= threshold of an input sparse matrix. #[allow(dead_code)] pub fn sparse_threshold(w: &Vec>, threshold: I32F32) -> Vec> { - let mut sparse_threshold_result: Vec> = vec![vec![]; w.len()]; - for (uid_i, weights_i) in w.iter().enumerate() { - for (uid_j, weight_ij) in weights_i.iter() { - if *weight_ij >= threshold { - sparse_threshold_result[uid_i as usize].push((*uid_j, *weight_ij)); - } - } - } - sparse_threshold_result + w.into_iter().map(|row| { + row.into_iter().filter(|(_, weight)| *weight >= threshold).copied().collect() + }) + .collect() } #[cfg(test)] +#[allow(clippy::indexing_slicing)] mod tests { use crate::math::*; use rand::{seq::SliceRandom, thread_rng, Rng}; diff --git a/pallets/subtensor/src/registration.rs b/pallets/subtensor/src/registration.rs index 63e4bdefe..91d5965c0 100644 --- a/pallets/subtensor/src/registration.rs +++ b/pallets/subtensor/src/registration.rs @@ -509,7 +509,7 @@ impl Pallet { .expect("convert u64 to block number."); let block_hash_at_number: ::Hash = system::Pallet::::block_hash(block_number); - let vec_hash: Vec = block_hash_at_number.as_ref().into_iter().cloned().collect(); + let vec_hash: Vec = block_hash_at_number.as_ref().into_iter().copied().collect(); let deref_vec_hash: &[u8] = &vec_hash; // c: &[u8] let real_hash: H256 = H256::from_slice(deref_vec_hash); @@ -526,138 +526,40 @@ impl Pallet { pub fn hash_to_vec(hash: H256) -> Vec { let hash_as_bytes: &[u8] = hash.as_bytes(); - let hash_as_vec: Vec = hash_as_bytes.iter().cloned().collect(); - return hash_as_vec; + let hash_as_vec: Vec = hash_as_bytes.iter().copied().collect(); + hash_as_vec } - pub fn hash_block_and_hotkey(block_hash_bytes: &[u8], hotkey: &T::AccountId) -> H256 { + #[allow(clippy::indexing_slicing)] + pub fn hash_block_and_hotkey(block_hash_bytes: &[u8; 32], hotkey: &T::AccountId) -> H256 { // Get the public key from the account id. let hotkey_pubkey: MultiAddress = MultiAddress::Id(hotkey.clone()); let binding = hotkey_pubkey.encode(); // Skip extra 0th byte. let hotkey_bytes: &[u8] = binding[1..].as_ref(); - let full_bytes: &[u8; 64] = &[ - block_hash_bytes[0], - block_hash_bytes[1], - block_hash_bytes[2], - block_hash_bytes[3], - block_hash_bytes[4], - block_hash_bytes[5], - block_hash_bytes[6], - block_hash_bytes[7], - block_hash_bytes[8], - block_hash_bytes[9], - block_hash_bytes[10], - block_hash_bytes[11], - block_hash_bytes[12], - block_hash_bytes[13], - block_hash_bytes[14], - block_hash_bytes[15], - block_hash_bytes[16], - block_hash_bytes[17], - block_hash_bytes[18], - block_hash_bytes[19], - block_hash_bytes[20], - block_hash_bytes[21], - block_hash_bytes[22], - block_hash_bytes[23], - block_hash_bytes[24], - block_hash_bytes[25], - block_hash_bytes[26], - block_hash_bytes[27], - block_hash_bytes[28], - block_hash_bytes[29], - block_hash_bytes[30], - block_hash_bytes[31], - hotkey_bytes[0], - hotkey_bytes[1], - hotkey_bytes[2], - hotkey_bytes[3], - hotkey_bytes[4], - hotkey_bytes[5], - hotkey_bytes[6], - hotkey_bytes[7], - hotkey_bytes[8], - hotkey_bytes[9], - hotkey_bytes[10], - hotkey_bytes[11], - hotkey_bytes[12], - hotkey_bytes[13], - hotkey_bytes[14], - hotkey_bytes[15], - hotkey_bytes[16], - hotkey_bytes[17], - hotkey_bytes[18], - hotkey_bytes[19], - hotkey_bytes[20], - hotkey_bytes[21], - hotkey_bytes[22], - hotkey_bytes[23], - hotkey_bytes[24], - hotkey_bytes[25], - hotkey_bytes[26], - hotkey_bytes[27], - hotkey_bytes[28], - hotkey_bytes[29], - hotkey_bytes[30], - hotkey_bytes[31], - ]; - let keccak_256_seal_hash_vec: [u8; 32] = keccak_256(full_bytes); - let seal_hash: H256 = H256::from_slice(&keccak_256_seal_hash_vec); - - return seal_hash; + let mut full_bytes = [0u8; 64]; + let (first_half, second_half) = full_bytes.split_at_mut(32); + first_half.copy_from_slice(block_hash_bytes); + // Safe because Substrate guarantees that all AccountId types are at least 32 bytes + second_half.copy_from_slice(&hotkey_bytes[..32]); + let keccak_256_seal_hash_vec: [u8; 32] = keccak_256(&full_bytes[..]); + let seal_hash = H256::from_slice(&keccak_256_seal_hash_vec); + + seal_hash } pub fn create_seal_hash(block_number_u64: u64, nonce_u64: u64, hotkey: &T::AccountId) -> H256 { - let nonce = U256::from(nonce_u64); + let nonce = nonce_u64.to_be_bytes(); let block_hash_at_number: H256 = Self::get_block_hash_from_u64(block_number_u64); - let block_hash_bytes: &[u8] = block_hash_at_number.as_bytes(); + let block_hash_bytes: &[u8; 32] = block_hash_at_number.as_fixed_bytes(); let binding = Self::hash_block_and_hotkey(block_hash_bytes, hotkey); - let block_and_hotkey_hash_bytes: &[u8] = binding.as_bytes(); - - let full_bytes: &[u8; 40] = &[ - nonce.byte(0), - nonce.byte(1), - nonce.byte(2), - nonce.byte(3), - nonce.byte(4), - nonce.byte(5), - nonce.byte(6), - nonce.byte(7), - block_and_hotkey_hash_bytes[0], - block_and_hotkey_hash_bytes[1], - block_and_hotkey_hash_bytes[2], - block_and_hotkey_hash_bytes[3], - block_and_hotkey_hash_bytes[4], - block_and_hotkey_hash_bytes[5], - block_and_hotkey_hash_bytes[6], - block_and_hotkey_hash_bytes[7], - block_and_hotkey_hash_bytes[8], - block_and_hotkey_hash_bytes[9], - block_and_hotkey_hash_bytes[10], - block_and_hotkey_hash_bytes[11], - block_and_hotkey_hash_bytes[12], - block_and_hotkey_hash_bytes[13], - block_and_hotkey_hash_bytes[14], - block_and_hotkey_hash_bytes[15], - block_and_hotkey_hash_bytes[16], - block_and_hotkey_hash_bytes[17], - block_and_hotkey_hash_bytes[18], - block_and_hotkey_hash_bytes[19], - block_and_hotkey_hash_bytes[20], - block_and_hotkey_hash_bytes[21], - block_and_hotkey_hash_bytes[22], - block_and_hotkey_hash_bytes[23], - block_and_hotkey_hash_bytes[24], - block_and_hotkey_hash_bytes[25], - block_and_hotkey_hash_bytes[26], - block_and_hotkey_hash_bytes[27], - block_and_hotkey_hash_bytes[28], - block_and_hotkey_hash_bytes[29], - block_and_hotkey_hash_bytes[30], - block_and_hotkey_hash_bytes[31], - ]; - let sha256_seal_hash_vec: [u8; 32] = sha2_256(full_bytes); + let block_and_hotkey_hash_bytes: &[u8; 32] = binding.as_fixed_bytes(); + + let mut full_bytes = [0u8; 40]; + let (first_chunk, second_chunk) = full_bytes.split_at_mut(8); + first_chunk.copy_from_slice(&nonce); + second_chunk.copy_from_slice(block_and_hotkey_hash_bytes); + let sha256_seal_hash_vec: [u8; 32] = sha2_256(&full_bytes[..]); let keccak_256_seal_hash_vec: [u8; 32] = keccak_256(&sha256_seal_hash_vec); let seal_hash: H256 = H256::from_slice(&keccak_256_seal_hash_vec); diff --git a/pallets/subtensor/src/root.rs b/pallets/subtensor/src/root.rs index 6393fce63..37d1b56c9 100644 --- a/pallets/subtensor/src/root.rs +++ b/pallets/subtensor/src/root.rs @@ -222,9 +222,9 @@ impl Pallet { log::error!("set_emission_values: netuids.len() != emission.len()"); return Err("netuids and emission must have the same length"); } - for (i, netuid_i) in netuids.iter().enumerate() { - log::debug!("set netuid:{:?} emission:{:?}", netuid_i, emission[i]); - EmissionValues::::insert(*netuid_i, emission[i]); + for (netuid_i, emission_i) in netuids.iter().zip(emission) { + log::debug!("set netuid:{:?} emission:{:?}", netuid_i, emission_i); + EmissionValues::::insert(*netuid_i, emission_i); } Ok(()) } @@ -261,13 +261,11 @@ impl Pallet { // --- 4. Iterate over each weight entry in `weights_i` to update the corresponding value in the // initialized `weights` 2D vector. Here, `uid_j` represents a subnet, and `weight_ij` is the // weight of `uid_i` with respect to `uid_j`. - for (netuid, weight_ij) in weights_i.iter() { - let option = subnet_list.iter().position(|item| item == netuid); - + for (netuid, weight_ij) in &weights_i { let idx = uid_i as usize; if let Some(weight) = weights.get_mut(idx) { - if let Some(netuid_idx) = option { - weight[netuid_idx] = I64F64::from_num(*weight_ij); + if let Some((w, _)) = weight.into_iter().zip(&subnet_list).find(|(_, subnet)| *subnet == netuid) { + *w = I64F64::from_num(*weight_ij); } } } @@ -337,8 +335,8 @@ impl Pallet { // --- 6. Retrieves and stores the stake value associated with each hotkey on the root network. // Stakes are stored in a 64-bit fixed point representation for precise calculations. let mut stake_i64: Vec = vec![I64F64::from_num(0.0); n as usize]; - for (uid_i, hotkey) in hotkeys.iter() { - stake_i64[*uid_i as usize] = I64F64::from_num(Self::get_total_stake_for_hotkey(hotkey)); + for ((_, hotkey), stake) in hotkeys.iter().zip(&mut stake_i64) { + *stake = I64F64::from_num(Self::get_total_stake_for_hotkey(hotkey)); } inplace_normalize_64(&mut stake_i64); log::debug!("S:\n{:?}\n", &stake_i64); @@ -358,12 +356,11 @@ impl Pallet { let total_networks = Self::get_num_subnets(); let mut trust = vec![I64F64::from_num(0); total_networks as usize]; let mut total_stake: I64F64 = I64F64::from_num(0); - for (idx, weights) in weights.iter().enumerate() { - let hotkey_stake = stake_i64[idx]; + for (weights, hotkey_stake) in weights.iter().zip(stake_i64) { total_stake += hotkey_stake; - for (weight_idx, weight) in weights.iter().enumerate() { + for (weight, trust_score) in weights.iter().zip(&mut trust) { if *weight > 0 { - trust[weight_idx] += hotkey_stake; + *trust_score += hotkey_stake; } } } @@ -389,20 +386,20 @@ impl Pallet { log::debug!("T:\n{:?}\n", &trust); let one = I64F64::from_num(1); let mut consensus = vec![I64F64::from_num(0); total_networks as usize]; - for (idx, trust_score) in trust.iter_mut().enumerate() { + for (trust_score, consensus_i) in trust.iter_mut().zip(&mut consensus) { let shifted_trust = *trust_score - I64F64::from_num(Self::get_float_kappa(0)); // Range( -kappa, 1 - kappa ) let temperatured_trust = shifted_trust * I64F64::from_num(Self::get_rho(0)); // Range( -rho * kappa, rho ( 1 - kappa ) ) let exponentiated_trust: I64F64 = substrate_fixed::transcendental::exp(-temperatured_trust) .expect("temperatured_trust is on range( -rho * kappa, rho ( 1 - kappa ) )"); - consensus[idx] = one / (one + exponentiated_trust); + *consensus_i = one / (one + exponentiated_trust); } log::debug!("C:\n{:?}\n", &consensus); let mut weighted_emission = vec![I64F64::from_num(0); total_networks as usize]; - for (idx, emission) in weighted_emission.iter_mut().enumerate() { - *emission = consensus[idx] * ranks[idx]; + for ((emission, consensus_i), rank) in weighted_emission.iter_mut().zip(&consensus).zip(&ranks) { + *emission = *consensus_i * (*rank); } inplace_normalize_64(&mut weighted_emission); log::debug!("Ei64:\n{:?}\n", &weighted_emission); diff --git a/pallets/subtensor/src/serving.rs b/pallets/subtensor/src/serving.rs index 39013642f..1982026bb 100644 --- a/pallets/subtensor/src/serving.rs +++ b/pallets/subtensor/src/serving.rs @@ -276,7 +276,7 @@ impl Pallet { } pub fn is_valid_ip_type(ip_type: u8) -> bool { - let allowed_values: Vec = vec![4, 6]; + let allowed_values = [4, 6]; return allowed_values.contains(&ip_type); } diff --git a/pallets/subtensor/src/utils.rs b/pallets/subtensor/src/utils.rs index b329c51db..a917c0bce 100644 --- a/pallets/subtensor/src/utils.rs +++ b/pallets/subtensor/src/utils.rs @@ -102,17 +102,19 @@ impl Pallet { // ================================== pub fn set_last_update_for_uid(netuid: u16, uid: u16, last_update: u64) { let mut updated_last_update_vec = Self::get_last_update(netuid); - if (uid as usize) < updated_last_update_vec.len() { - updated_last_update_vec[uid as usize] = last_update; - LastUpdate::::insert(netuid, updated_last_update_vec); - } + let Some(updated_last_update) = updated_last_update_vec.get_mut(uid as usize) else { + return; + }; + *updated_last_update = last_update; + LastUpdate::::insert(netuid, updated_last_update_vec); } pub fn set_active_for_uid(netuid: u16, uid: u16, active: bool) { let mut updated_active_vec = Self::get_active(netuid); - if (uid as usize) < updated_active_vec.len() { - updated_active_vec[uid as usize] = active; - Active::::insert(netuid, updated_active_vec); - } + let Some(updated_active) = updated_active_vec.get_mut(uid as usize) else { + return; + }; + *updated_active = active; + Active::::insert(netuid, updated_active_vec); } pub fn set_pruning_score_for_uid(netuid: u16, uid: u16, pruning_score: u16) { log::info!("netuid = {:?}", netuid); @@ -122,14 +124,19 @@ impl Pallet { ); log::info!("uid = {:?}", uid); assert!(uid < SubnetworkN::::get(netuid)); - PruningScores::::mutate(netuid, |v| v[uid as usize] = pruning_score); + PruningScores::::mutate(netuid, |v| { + if let Some(s) = v.get_mut(uid as usize) { + *s = pruning_score; + } + }); } pub fn set_validator_permit_for_uid(netuid: u16, uid: u16, validator_permit: bool) { - let mut updated_validator_permit = Self::get_validator_permit(netuid); - if (uid as usize) < updated_validator_permit.len() { - updated_validator_permit[uid as usize] = validator_permit; - ValidatorPermit::::insert(netuid, updated_validator_permit); - } + let mut updated_validator_permits = Self::get_validator_permit(netuid); + let Some(updated_validator_permit) = updated_validator_permits.get_mut(uid as usize) else { + return; + }; + *updated_validator_permit = validator_permit; + ValidatorPermit::::insert(netuid, updated_validator_permits); } pub fn set_weights_min_stake(min_stake: u64) { WeightsMinStake::::put(min_stake); @@ -146,91 +153,47 @@ impl Pallet { } pub fn get_rank_for_uid(netuid: u16, uid: u16) -> u16 { let vec = Rank::::get(netuid); - if (uid as usize) < vec.len() { - return vec[uid as usize]; - } else { - return 0; - } + vec.get(uid as usize).copied().unwrap_or(0) } pub fn get_trust_for_uid(netuid: u16, uid: u16) -> u16 { let vec = Trust::::get(netuid); - if (uid as usize) < vec.len() { - return vec[uid as usize]; - } else { - return 0; - } + vec.get(uid as usize).copied().unwrap_or(0) } pub fn get_emission_for_uid(netuid: u16, uid: u16) -> u64 { let vec = Emission::::get(netuid); - if (uid as usize) < vec.len() { - return vec[uid as usize]; - } else { - return 0; - } + vec.get(uid as usize).copied().unwrap_or(0) } pub fn get_active_for_uid(netuid: u16, uid: u16) -> bool { let vec = Active::::get(netuid); - if (uid as usize) < vec.len() { - return vec[uid as usize]; - } else { - return false; - } + vec.get(uid as usize).copied().unwrap_or(false) } pub fn get_consensus_for_uid(netuid: u16, uid: u16) -> u16 { let vec = Consensus::::get(netuid); - if (uid as usize) < vec.len() { - return vec[uid as usize]; - } else { - return 0; - } + vec.get(uid as usize).copied().unwrap_or(0) } pub fn get_incentive_for_uid(netuid: u16, uid: u16) -> u16 { let vec = Incentive::::get(netuid); - if (uid as usize) < vec.len() { - return vec[uid as usize]; - } else { - return 0; - } + vec.get(uid as usize).copied().unwrap_or(0) } pub fn get_dividends_for_uid(netuid: u16, uid: u16) -> u16 { let vec = Dividends::::get(netuid); - if (uid as usize) < vec.len() { - return vec[uid as usize]; - } else { - return 0; - } + vec.get(uid as usize).copied().unwrap_or(0) } pub fn get_last_update_for_uid(netuid: u16, uid: u16) -> u64 { let vec = LastUpdate::::get(netuid); - if (uid as usize) < vec.len() { - return vec[uid as usize]; - } else { - return 0; - } + vec.get(uid as usize).copied().unwrap_or(0) } pub fn get_pruning_score_for_uid(netuid: u16, uid: u16) -> u16 { let vec = PruningScores::::get(netuid); - if (uid as usize) < vec.len() { - return vec[uid as usize]; - } else { - return u16::MAX; - } + vec.get(uid as usize).copied().unwrap_or(u16::MAX) } pub fn get_validator_trust_for_uid(netuid: u16, uid: u16) -> u16 { let vec = ValidatorTrust::::get(netuid); - if (uid as usize) < vec.len() { - return vec[uid as usize]; - } else { - return 0; - } + vec.get(uid as usize).copied().unwrap_or(0) } pub fn get_validator_permit_for_uid(netuid: u16, uid: u16) -> bool { let vec = ValidatorPermit::::get(netuid); - if (uid as usize) < vec.len() { - return vec[uid as usize]; - } else { - return false; - } + vec.get(uid as usize).copied().unwrap_or(false) } pub fn get_weights_min_stake() -> u64 { WeightsMinStake::::get() diff --git a/pallets/subtensor/src/weights.rs b/pallets/subtensor/src/weights.rs index 1bc96fee7..c60256df4 100644 --- a/pallets/subtensor/src/weights.rs +++ b/pallets/subtensor/src/weights.rs @@ -223,7 +223,7 @@ impl Pallet { if last_set_weights == 0 { return true; } // (Storage default) Never set weights. - return current_block - last_set_weights >= Self::get_weights_set_rate_limit(netuid); + return (current_block - last_set_weights) >= Self::get_weights_set_rate_limit(netuid); } // --- 3. Non registered peers cant pass. return false; @@ -335,7 +335,8 @@ impl Pallet { if weights.len() != 1 { return false; } - if uid != uids[0] { + let Some(first_uid) = uids.first() else { return false; }; + if uid != *first_uid { return false; } return true; From 261b9802c0883946f344b91139f2112b4ff6f296 Mon Sep 17 00:00:00 2001 From: Keith Date: Fri, 26 Apr 2024 23:18:50 +0800 Subject: [PATCH 02/10] Remove direct indexing in registry pallet --- pallets/registry/src/types.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/registry/src/types.rs b/pallets/registry/src/types.rs index 9e23b5629..0573392cd 100644 --- a/pallets/registry/src/types.rs +++ b/pallets/registry/src/types.rs @@ -88,8 +88,8 @@ impl Encode for Data { Data::None => vec![0u8; 1], Data::Raw(ref x) => { let l = x.len().min(64); - let mut r = vec![l as u8 + 1; l + 1]; - r[1..].copy_from_slice(&x[..l]); + let mut r = vec![l as u8 + 1]; + r.extend_from_slice(&x[..]); r } Data::BlakeTwo256(ref h) => once(66u8).chain(h.iter().cloned()).collect(), From 4c7c0a5a810ec84527907a88df945db6813c3964 Mon Sep 17 00:00:00 2001 From: Keith Date: Mon, 29 Apr 2024 13:04:48 +0800 Subject: [PATCH 03/10] Remove unwraps in most of the codebase --- node/src/chain_spec.rs | 15 ++-- pallets/admin-utils/src/lib.rs | 1 - pallets/subtensor/src/block_step.rs | 22 +++--- pallets/subtensor/src/delegate_info.rs | 12 ++-- pallets/subtensor/src/lib.rs | 14 ++-- pallets/subtensor/src/migration.rs | 2 +- pallets/subtensor/src/registration.rs | 13 ++-- pallets/subtensor/src/root.rs | 20 ++---- pallets/subtensor/src/serving.rs | 9 ++- pallets/subtensor/src/stake_info.rs | 20 ++++-- pallets/subtensor/src/staking.rs | 95 +++++++++----------------- pallets/subtensor/src/uids.rs | 6 +- pallets/subtensor/src/weights.rs | 12 +--- pallets/subtensor/tests/block_step.rs | 40 ++++++----- 14 files changed, 112 insertions(+), 169 deletions(-) diff --git a/node/src/chain_spec.rs b/node/src/chain_spec.rs index 69bc8e9c0..015cad940 100644 --- a/node/src/chain_spec.rs +++ b/node/src/chain_spec.rs @@ -1,3 +1,6 @@ +// Allowed since it's actually better to panic during chain setup when there is an error +#![allow(clippy::unwrap_used)] + use node_subtensor_runtime::{ AccountId, AuraConfig, BalancesConfig, GrandpaConfig, RuntimeGenesisConfig, SenateMembersConfig, Signature, SubtensorModuleConfig, SudoConfig, SystemConfig, @@ -90,14 +93,14 @@ pub fn finney_mainnet_config() -> Result { Vec<(sp_runtime::AccountId32, (u64, u16))>, )> = Vec::new(); for (coldkey_str, hotkeys) in old_state.stakes.iter() { - let coldkey = ::from_ss58check(coldkey_str).unwrap(); + let coldkey = ::from_ss58check(coldkey_str).map_err(|e| e.to_string())?; let coldkey_account = sp_runtime::AccountId32::from(coldkey); let mut processed_hotkeys: Vec<(sp_runtime::AccountId32, (u64, u16))> = Vec::new(); for (hotkey_str, amount_uid) in hotkeys.iter() { let (amount, uid) = amount_uid; - let hotkey = ::from_ss58check(hotkey_str).unwrap(); + let hotkey = ::from_ss58check(hotkey_str).map_err(|e| e.to_string())?; let hotkey_account = sp_runtime::AccountId32::from(hotkey); processed_hotkeys.push((hotkey_account, (*amount, *uid))); @@ -109,7 +112,7 @@ pub fn finney_mainnet_config() -> Result { let mut balances_issuance: u64 = 0; let mut processed_balances: Vec<(sp_runtime::AccountId32, u64)> = Vec::new(); for (key_str, amount) in old_state.balances.iter() { - let key = ::from_ss58check(key_str).unwrap(); + let key = ::from_ss58check(key_str).map_err(|e| e.to_string())?; let key_account = sp_runtime::AccountId32::from(key); processed_balances.push((key_account, *amount)); @@ -266,14 +269,14 @@ pub fn finney_testnet_config() -> Result { Vec<(sp_runtime::AccountId32, (u64, u16))>, )> = Vec::new(); for (coldkey_str, hotkeys) in old_state.stakes.iter() { - let coldkey = ::from_ss58check(coldkey_str).unwrap(); + let coldkey = ::from_ss58check(coldkey_str).map_err(|e| e.to_string())?; let coldkey_account = sp_runtime::AccountId32::from(coldkey); let mut processed_hotkeys: Vec<(sp_runtime::AccountId32, (u64, u16))> = Vec::new(); for (hotkey_str, amount_uid) in hotkeys.iter() { let (amount, uid) = amount_uid; - let hotkey = ::from_ss58check(hotkey_str).unwrap(); + let hotkey = ::from_ss58check(hotkey_str).map_err(|e| e.to_string())?; let hotkey_account = sp_runtime::AccountId32::from(hotkey); processed_hotkeys.push((hotkey_account, (*amount, *uid))); @@ -285,7 +288,7 @@ pub fn finney_testnet_config() -> Result { let mut balances_issuance: u64 = 0; let mut processed_balances: Vec<(sp_runtime::AccountId32, u64)> = Vec::new(); for (key_str, amount) in old_state.balances.iter() { - let key = ::from_ss58check(key_str).unwrap(); + let key = ::from_ss58check(key_str).map_err(|e| e.to_string())?; let key_account = sp_runtime::AccountId32::from(key); processed_balances.push((key_account, *amount)); diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 513304666..ca66bef3b 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -836,7 +836,6 @@ pub trait SubtensorInterface { hotkey: &AccountId, increment: u64, ); - fn u64_to_balance(input: u64) -> Option; fn add_balance_to_coldkey_account(coldkey: &AccountId, amount: Balance); fn get_current_block_as_u64() -> u64; fn get_subnetwork_n(netuid: u16) -> u16; diff --git a/pallets/subtensor/src/block_step.rs b/pallets/subtensor/src/block_step.rs index 168c2879e..535e06c8f 100644 --- a/pallets/subtensor/src/block_step.rs +++ b/pallets/subtensor/src/block_step.rs @@ -74,11 +74,8 @@ impl Pallet { } } - pub fn has_loaded_emission_tuples(netuid: u16) -> bool { - LoadedEmission::::contains_key(netuid) - } - pub fn get_loaded_emission_tuples(netuid: u16) -> Vec<(T::AccountId, u64, u64)> { - LoadedEmission::::get(netuid).unwrap() + pub fn get_loaded_emission_tuples(netuid: u16) -> Option> { + LoadedEmission::::get(netuid) } // Reads from the loaded emission storage which contains lists of pending emission tuples ( hotkey, amount ) @@ -87,11 +84,10 @@ impl Pallet { pub fn drain_emission(_: u64) { // --- 1. We iterate across each network. for (netuid, _) in as IterableStorageMap>::iter() { - if !Self::has_loaded_emission_tuples(netuid) { - continue; - } // There are no tuples to emit. - let tuples_to_drain: Vec<(T::AccountId, u64, u64)> = - Self::get_loaded_emission_tuples(netuid); + let Some(tuples_to_drain) = Self::get_loaded_emission_tuples(netuid) else { + // There are no tuples to emit. + continue + }; let mut total_emitted: u64 = 0; for (hotkey, server_amount, validator_amount) in tuples_to_drain.iter() { Self::emit_inflation_through_hotkey_account( @@ -139,7 +135,7 @@ impl Pallet { Self::add_balance_to_coldkey_account( &Self::get_subnet_owner(netuid), - Self::u64_to_balance(cut.to_num::()).unwrap(), + cut.to_num::(), ); // We are creating tokens here from the coinbase. @@ -189,10 +185,8 @@ impl Pallet { // --- 10. Sink the emission tuples onto the already loaded. let mut concat_emission_tuples: Vec<(T::AccountId, u64, u64)> = emission_tuples_this_block.clone(); - if Self::has_loaded_emission_tuples(netuid) { + if let Some(mut current_emission_tuples) = Self::get_loaded_emission_tuples(netuid) { // 10.a We already have loaded emission tuples, so we concat the new ones. - let mut current_emission_tuples: Vec<(T::AccountId, u64, u64)> = - Self::get_loaded_emission_tuples(netuid); concat_emission_tuples.append(&mut current_emission_tuples); } LoadedEmission::::insert(netuid, concat_emission_tuples); diff --git a/pallets/subtensor/src/delegate_info.rs b/pallets/subtensor/src/delegate_info.rs index 03fe2bf34..e8a3053fd 100644 --- a/pallets/subtensor/src/delegate_info.rs +++ b/pallets/subtensor/src/delegate_info.rs @@ -87,7 +87,7 @@ impl Pallet { } let delegate: AccountIdOf = - T::AccountId::decode(&mut delegate_account_vec.as_bytes_ref()).unwrap(); + T::AccountId::decode(&mut delegate_account_vec.as_bytes_ref()).ok()?; // Check delegate exists if !>::contains_key(delegate.clone()) { return None; @@ -110,12 +110,10 @@ impl Pallet { } pub fn get_delegated(delegatee_account_vec: Vec) -> Vec<(DelegateInfo, Compact)> { - if delegatee_account_vec.len() != 32 { - return Vec::new(); // No delegates for invalid account - } - - let delegatee: AccountIdOf = - T::AccountId::decode(&mut delegatee_account_vec.as_bytes_ref()).unwrap(); + let Ok(delegatee) = + T::AccountId::decode(&mut delegatee_account_vec.as_bytes_ref()) else { + return Vec::new(); // No delegates for invalid account + }; let mut delegates: Vec<(DelegateInfo, Compact)> = Vec::new(); for delegate in diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 9444e9fe7..23fa4bf80 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -231,7 +231,8 @@ pub mod pallet { } #[pallet::type_value] pub fn DefaultAccount() -> T::AccountId { - T::AccountId::decode(&mut TrailingZeroInput::zeroes()).unwrap() + T::AccountId::decode(&mut TrailingZeroInput::zeroes()) + .expect("trailing zeroes always produce a valid account ID; qed") } #[pallet::type_value] pub fn DefaultTargetStakesPerInterval() -> u64 { @@ -513,7 +514,8 @@ pub mod pallet { } #[pallet::type_value] pub fn DefaultSubnetOwner() -> T::AccountId { - T::AccountId::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap() + T::AccountId::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()) + .expect("trailing zeroes always produce a valid account ID; qed") } #[pallet::type_value] pub fn DefaultSubnetLocked() -> u64 { @@ -784,7 +786,8 @@ pub mod pallet { } #[pallet::type_value] pub fn DefaultKey() -> T::AccountId { - T::AccountId::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap() + T::AccountId::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()) + .expect("trailing zeroes always produce a valid account ID; qed") } #[pallet::storage] // --- DMAP ( netuid, hotkey ) --> uid @@ -1719,9 +1722,8 @@ pub mod pallet { impl Pallet { // --- Returns the transaction priority for setting weights. pub fn get_priority_set_weights(hotkey: &T::AccountId, netuid: u16) -> u64 { - if Uids::::contains_key(netuid, hotkey) { - let uid = Self::get_uid_for_net_and_hotkey(netuid, &hotkey.clone()).unwrap(); - let _stake = Self::get_total_stake_for_hotkey(&hotkey); + if let Ok(uid) = Self::get_uid_for_net_and_hotkey(netuid, hotkey) { + let _stake = Self::get_total_stake_for_hotkey(hotkey); let current_block_number: u64 = Self::get_current_block_as_u64(); let default_priority: u64 = current_block_number - Self::get_last_update_for_uid(netuid, uid); diff --git a/pallets/subtensor/src/migration.rs b/pallets/subtensor/src/migration.rs index 354b199d8..8d66eefa7 100644 --- a/pallets/subtensor/src/migration.rs +++ b/pallets/subtensor/src/migration.rs @@ -89,7 +89,7 @@ pub fn migrate_transfer_ownership_to_foundation(coldkey: [u8; 32]) -> // We have to decode this using a byte slice as we don't have crypto-std let coldkey_account: ::AccountId = - ::AccountId::decode(&mut &coldkey[..]).unwrap(); + ::AccountId::decode(&mut &coldkey[..]).expect("coldkey is 32-byte array; qed"); info!("Foundation coldkey: {:?}", coldkey_account); let current_block = Pallet::::get_current_block_as_u64(); diff --git a/pallets/subtensor/src/registration.rs b/pallets/subtensor/src/registration.rs index a403465ae..31443fa67 100644 --- a/pallets/subtensor/src/registration.rs +++ b/pallets/subtensor/src/registration.rs @@ -95,14 +95,13 @@ impl Pallet { // --- 7. Ensure the callers coldkey has enough stake to perform the transaction. let current_block_number: u64 = Self::get_current_block_as_u64(); let registration_cost_as_u64 = Self::get_burn_as_u64(netuid); - let registration_cost_as_balance = Self::u64_to_balance(registration_cost_as_u64).unwrap(); ensure!( - Self::can_remove_balance_from_coldkey_account(&coldkey, registration_cost_as_balance), + Self::can_remove_balance_from_coldkey_account(&coldkey, registration_cost_as_u64), Error::::NotEnoughBalanceToStake ); // --- 8. Ensure the remove operation from the coldkey is a success. - let actual_burn_amount = Self::remove_balance_from_coldkey_account(&coldkey, registration_cost_as_balance)?; + let actual_burn_amount = Self::remove_balance_from_coldkey_account(&coldkey, registration_cost_as_u64)?; // The burn occurs here. Self::burn_tokens(actual_burn_amount); @@ -396,8 +395,7 @@ impl Pallet { let balance_to_add: u64 = 100_000_000_000; Self::coinbase(100_000_000_000); // We are creating tokens here from the coinbase. - let balance_to_be_added_as_balance = Self::u64_to_balance(balance_to_add); - Self::add_balance_to_coldkey_account(&coldkey, balance_to_be_added_as_balance.unwrap()); + Self::add_balance_to_coldkey_account(&coldkey, balance_to_add); // --- 6. Deposit successful event. log::info!( @@ -627,12 +625,11 @@ impl Pallet { .saturating_accrue(T::DbWeight::get().reads((TotalNetworks::::get() + 1u16) as u64)); let swap_cost = 1_000_000_000u64; - let swap_cost_as_balance = Self::u64_to_balance(swap_cost).unwrap(); ensure!( - Self::can_remove_balance_from_coldkey_account(&coldkey, swap_cost_as_balance), + Self::can_remove_balance_from_coldkey_account(&coldkey, swap_cost), Error::::NotEnoughBalance ); - let actual_burn_amount = Self::remove_balance_from_coldkey_account(&coldkey, swap_cost_as_balance)?; + let actual_burn_amount = Self::remove_balance_from_coldkey_account(&coldkey, swap_cost)?; Self::burn_tokens(actual_burn_amount); Owner::::remove(old_hotkey); diff --git a/pallets/subtensor/src/root.rs b/pallets/subtensor/src/root.rs index 504833df7..cc168a8eb 100644 --- a/pallets/subtensor/src/root.rs +++ b/pallets/subtensor/src/root.rs @@ -508,7 +508,7 @@ impl Pallet { } subnetwork_uid = lowest_uid; let replaced_hotkey: T::AccountId = - Self::get_hotkey_for_net_and_uid(root_netuid, subnetwork_uid).unwrap(); + Self::get_hotkey_for_net_and_uid(root_netuid, subnetwork_uid)?; // --- 13.1.2 The new account has a higher stake than the one being replaced. ensure!( @@ -647,14 +647,8 @@ impl Pallet { // --- 2. Calculate and lock the required tokens. let lock_amount: u64 = Self::get_network_lock_cost(); - let lock_as_balance = Self::u64_to_balance(lock_amount); - log::debug!("network lock_amount: {:?}", lock_amount,); ensure!( - lock_as_balance.is_some(), - Error::::CouldNotConvertToBalance - ); - ensure!( - Self::can_remove_balance_from_coldkey_account(&coldkey, lock_as_balance.unwrap()), + Self::can_remove_balance_from_coldkey_account(&coldkey, lock_amount), Error::::NotEnoughBalanceToStake ); @@ -687,7 +681,7 @@ impl Pallet { }; // --- 5. Perform the lock operation. - let actual_lock_amount = Self::remove_balance_from_coldkey_account(&coldkey, lock_as_balance.unwrap())?; + let actual_lock_amount = Self::remove_balance_from_coldkey_account(&coldkey, lock_amount)?; Self::set_subnet_locked_balance(netuid_to_register, actual_lock_amount); Self::set_network_last_lock(actual_lock_amount); @@ -850,12 +844,6 @@ impl Pallet { let owner_coldkey = SubnetOwner::::get(netuid); let reserved_amount = Self::get_subnet_locked_balance(netuid); - // Ensure that we can convert this u64 to a balance. - let reserved_amount_as_bal = Self::u64_to_balance(reserved_amount); - if reserved_amount_as_bal.is_none() { - return; - } - // --- 2. Remove network count. SubnetworkN::::remove(netuid); @@ -925,7 +913,7 @@ impl Pallet { BurnRegistrationsThisInterval::::remove(netuid); // --- 12. Add the balance back to the owner. - Self::add_balance_to_coldkey_account(&owner_coldkey, reserved_amount_as_bal.unwrap()); + Self::add_balance_to_coldkey_account(&owner_coldkey, reserved_amount); Self::set_subnet_locked_balance(netuid, 0); SubnetOwner::::remove(netuid); } diff --git a/pallets/subtensor/src/serving.rs b/pallets/subtensor/src/serving.rs index 6008b1b10..84546bd19 100644 --- a/pallets/subtensor/src/serving.rs +++ b/pallets/subtensor/src/serving.rs @@ -1,5 +1,4 @@ use super::*; -use frame_support::sp_std::vec; impl Pallet { // ---- The implementation for the extrinsic serve_axon which sets the ip endpoint information for a uid on a network. @@ -245,8 +244,8 @@ impl Pallet { } pub fn get_axon_info(netuid: u16, hotkey: &T::AccountId) -> AxonInfoOf { - if Self::has_axon_info(netuid, hotkey) { - Axons::::get(netuid, hotkey).unwrap() + if let Some(axons) = Axons::::get(netuid, hotkey) { + axons } else { AxonInfo { block: 0, @@ -262,8 +261,8 @@ impl Pallet { } pub fn get_prometheus_info(netuid: u16, hotkey: &T::AccountId) -> PrometheusInfoOf { - if Self::has_prometheus_info(netuid, hotkey) { - Prometheus::::get(netuid, hotkey).unwrap() + if let Some(prometheus) = Prometheus::::get(netuid, hotkey) { + prometheus } else { PrometheusInfo { block: 0, diff --git a/pallets/subtensor/src/stake_info.rs b/pallets/subtensor/src/stake_info.rs index 8e3939766..9d5059986 100644 --- a/pallets/subtensor/src/stake_info.rs +++ b/pallets/subtensor/src/stake_info.rs @@ -47,8 +47,10 @@ impl Pallet { if coldkey_account_vec.len() != 32 { continue; // Invalid coldkey } - let coldkey: AccountIdOf = - T::AccountId::decode(&mut coldkey_account_vec.as_bytes_ref()).unwrap(); + let Ok(coldkey) = + T::AccountId::decode(&mut coldkey_account_vec.as_bytes_ref()) else { + continue; + }; coldkeys.push(coldkey); } @@ -56,8 +58,6 @@ impl Pallet { return Vec::new(); // Invalid coldkey } - - Self::_get_stake_info_for_coldkeys(coldkeys) } @@ -66,14 +66,20 @@ impl Pallet { return Vec::new(); // Invalid coldkey } - let coldkey: AccountIdOf = - T::AccountId::decode(&mut coldkey_account_vec.as_bytes_ref()).unwrap(); + let Ok(coldkey) = + T::AccountId::decode(&mut coldkey_account_vec.as_bytes_ref()) else { + return Vec::new(); + }; let stake_info = Self::_get_stake_info_for_coldkeys(vec![coldkey]); if stake_info.is_empty() { Vec::new()// Invalid coldkey } else { - return stake_info.first().unwrap().1.clone(); + let Some(first) = stake_info.first() else { + return Vec::new(); + }; + + first.1.clone() } } } diff --git a/pallets/subtensor/src/staking.rs b/pallets/subtensor/src/staking.rs index 34f1f3c15..213108bea 100644 --- a/pallets/subtensor/src/staking.rs +++ b/pallets/subtensor/src/staking.rs @@ -141,32 +141,25 @@ impl Pallet { stake_to_be_added ); - // --- 2. We convert the stake u64 into a balancer. - let stake_as_balance = Self::u64_to_balance(stake_to_be_added); + // --- 2. Ensure the callers coldkey has enough stake to perform the transaction. ensure!( - stake_as_balance.is_some(), - Error::::CouldNotConvertToBalance - ); - - // --- 3. Ensure the callers coldkey has enough stake to perform the transaction. - ensure!( - Self::can_remove_balance_from_coldkey_account(&coldkey, stake_as_balance.unwrap()), + Self::can_remove_balance_from_coldkey_account(&coldkey, stake_to_be_added), Error::::NotEnoughBalanceToStake ); - // --- 4. Ensure that the hotkey account exists this is only possible through registration. + // --- 3. Ensure that the hotkey account exists this is only possible through registration. ensure!( Self::hotkey_account_exists(&hotkey), Error::::NotRegistered ); - // --- 5. Ensure that the hotkey allows delegation or that the hotkey is owned by the calling coldkey. + // --- 4. Ensure that the hotkey allows delegation or that the hotkey is owned by the calling coldkey. ensure!( Self::hotkey_is_delegate(&hotkey) || Self::coldkey_owns_hotkey(&coldkey, &hotkey), Error::::NonAssociatedColdKey ); - // --- 6. Ensure we don't exceed stake rate limit + // --- 5. Ensure we don't exceed stake rate limit let stakes_this_interval = Self::get_stakes_this_interval_for_coldkey_hotkey(&coldkey, &hotkey); ensure!( @@ -174,7 +167,7 @@ impl Pallet { Error::::StakeRateLimitExceeded ); - // --- 7. If this is a nomination stake, check if total stake after adding will be above + // --- 6. If this is a nomination stake, check if total stake after adding will be above // the minimum required stake. // If coldkey is not owner of the hotkey, it's a nomination stake. @@ -188,18 +181,18 @@ impl Pallet { ); } - // --- 8. Ensure the remove operation from the coldkey is a success. + // --- 7. Ensure the remove operation from the coldkey is a success. let actual_amount_to_stake = - Self::remove_balance_from_coldkey_account(&coldkey, stake_as_balance.unwrap())?; + Self::remove_balance_from_coldkey_account(&coldkey, stake_to_be_added)?; - // --- 9. If we reach here, add the balance to the hotkey. + // --- 8. If we reach here, add the balance to the hotkey. Self::increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, actual_amount_to_stake); // Set last block for rate limiting let block: u64 = Self::get_current_block_as_u64(); Self::set_last_tx_block(&coldkey, block); - // --- 10. Emit the staking event. + // --- 9. Emit the staking event. Self::set_stakes_this_interval_for_coldkey_hotkey( &coldkey, &hotkey, @@ -213,7 +206,7 @@ impl Pallet { ); Self::deposit_event(Event::StakeAdded(hotkey, actual_amount_to_stake)); - // --- 11. Ok and return. + // --- 10. Ok and return. Ok(()) } @@ -288,14 +281,7 @@ impl Pallet { Error::::NotEnoughStaketoWithdraw ); - // --- 5. Ensure that we can conver this u64 to a balance. - let stake_to_be_added_as_currency = Self::u64_to_balance(stake_to_be_removed); - ensure!( - stake_to_be_added_as_currency.is_some(), - Error::::CouldNotConvertToBalance - ); - - // --- 6. Ensure we don't exceed stake rate limit + // --- 5. Ensure we don't exceed stake rate limit let unstakes_this_interval = Self::get_stakes_this_interval_for_coldkey_hotkey(&coldkey, &hotkey); ensure!( @@ -303,7 +289,7 @@ impl Pallet { Error::::UnstakeRateLimitExceeded ); - // --- 7. If this is a nomination stake, check if total stake after removing will be above + // --- 6. If this is a nomination stake, check if total stake after removing will be above // the minimum required stake. // If coldkey is not owner of the hotkey, it's a nomination stake. @@ -317,17 +303,17 @@ impl Pallet { ); } - // --- 8. We remove the balance from the hotkey. + // --- 7. We remove the balance from the hotkey. Self::decrease_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake_to_be_removed); - // --- 9. We add the balancer to the coldkey. If the above fails we will not credit this coldkey. - Self::add_balance_to_coldkey_account(&coldkey, stake_to_be_added_as_currency.unwrap()); + // --- 8. We add the balancer to the coldkey. If the above fails we will not credit this coldkey. + Self::add_balance_to_coldkey_account(&coldkey, stake_to_be_removed); // Set last block for rate limiting let block: u64 = Self::get_current_block_as_u64(); Self::set_last_tx_block(&coldkey, block); - // --- 10. Emit the unstaking event. + // --- 9. Emit the unstaking event. Self::set_stakes_this_interval_for_coldkey_hotkey( &coldkey, &hotkey, @@ -341,7 +327,7 @@ impl Pallet { ); Self::deposit_event(Event::StakeRemoved(hotkey, stake_to_be_removed)); - // --- 11. Done and ok. + // --- 10. Done and ok. Ok(()) } @@ -566,9 +552,8 @@ impl Pallet { // Remove the stake from the nominator account. (this is a more forceful unstake operation which ) // Actually deletes the staking account. Self::empty_stake_on_coldkey_hotkey_account(&coldkey, &hotkey); - // Convert the removed stake back to balance and add it to the coldkey account. - let stake_as_balance = Self::u64_to_balance(stake); - Self::add_balance_to_coldkey_account(&coldkey, stake_as_balance.unwrap()); + // Add the stake to the coldkey account. + Self::add_balance_to_coldkey_account(&coldkey, stake); } } } @@ -584,14 +569,6 @@ impl Pallet { } } - pub fn u64_to_balance( - input: u64, - ) -> Option< - <::Currency as fungible::Inspect<::AccountId>>::Balance, - >{ - input.try_into().ok() - } - pub fn add_balance_to_coldkey_account( coldkey: &T::AccountId, amount: <::Currency as fungible::Inspect<::AccountId>>::Balance, @@ -671,26 +648,18 @@ impl Pallet { hotkey, ) { - // Convert to balance and add to the coldkey account. - let stake_i_as_balance = Self::u64_to_balance(stake_i); - if stake_i_as_balance.is_none() { - continue; // Don't unstake if we can't convert to balance. - } else { - // Stake is successfully converted to balance. - - // Remove the stake from the coldkey - hotkey pairing. - Self::decrease_stake_on_coldkey_hotkey_account( - &delegate_coldkey_i, - hotkey, - stake_i, - ); - - // Add the balance to the coldkey account. - Self::add_balance_to_coldkey_account( - &delegate_coldkey_i, - stake_i_as_balance.unwrap(), - ); - } + // Remove the stake from the coldkey - hotkey pairing. + Self::decrease_stake_on_coldkey_hotkey_account( + &delegate_coldkey_i, + hotkey, + stake_i, + ); + + // Add the balance to the coldkey account. + Self::add_balance_to_coldkey_account( + &delegate_coldkey_i, + stake_i, + ); } } } diff --git a/pallets/subtensor/src/uids.rs b/pallets/subtensor/src/uids.rs index 822b04540..1a1dcd1da 100644 --- a/pallets/subtensor/src/uids.rs +++ b/pallets/subtensor/src/uids.rs @@ -116,10 +116,8 @@ impl Pallet { // Returns the stake of the uid on network or 0 if it doesnt exist. // pub fn get_stake_for_uid_and_subnetwork(netuid: u16, neuron_uid: u16) -> u64 { - if Self::is_uid_exist_on_network(netuid, neuron_uid) { - Self::get_total_stake_for_hotkey( - &Self::get_hotkey_for_net_and_uid(netuid, neuron_uid).unwrap(), - ) + if let Ok(hotkey) = Self::get_hotkey_for_net_and_uid(netuid, neuron_uid) { + Self::get_total_stake_for_hotkey(&hotkey) } else { 0 } diff --git a/pallets/subtensor/src/weights.rs b/pallets/subtensor/src/weights.rs index 35cec87bc..c588fe4e6 100644 --- a/pallets/subtensor/src/weights.rs +++ b/pallets/subtensor/src/weights.rs @@ -121,17 +121,9 @@ impl Pallet { Error::::IncorrectNetworkVersionKey ); - // --- 9. Get the neuron uid of associated hotkey on network netuid. + // --- 8. Get the neuron uid of associated hotkey on network netuid. - let net_neuron_uid = Self::get_uid_for_net_and_hotkey(netuid, &hotkey); - ensure!( - net_neuron_uid.is_ok(), - net_neuron_uid - .err() - .unwrap_or(Error::::NotRegistered.into()) - ); - - let neuron_uid = net_neuron_uid.unwrap(); + let neuron_uid = Self::get_uid_for_net_and_hotkey(netuid, &hotkey)?; // --- 9. Ensure the uid is not setting weights faster than the weights_set_rate_limit. let current_block: u64 = Self::get_current_block_as_u64(); diff --git a/pallets/subtensor/tests/block_step.rs b/pallets/subtensor/tests/block_step.rs index ce32ad0e4..5d9cbdd7b 100644 --- a/pallets/subtensor/tests/block_step.rs +++ b/pallets/subtensor/tests/block_step.rs @@ -5,6 +5,7 @@ use mock::*; use sp_core::U256; #[test] +#[allow(clippy::unwrap_used)] fn test_loaded_emission() { new_test_ext(1).execute_with(|| { let n: u16 = 100; @@ -19,7 +20,7 @@ fn test_loaded_emission() { for i in 0..n { SubtensorModule::append_neuron(netuid, &U256::from(i), 0); } - assert!(!SubtensorModule::has_loaded_emission_tuples(netuid)); + assert!(SubtensorModule::get_loaded_emission_tuples(netuid).is_none()); // Try loading at block 0 let block: u64 = 0; @@ -28,7 +29,7 @@ fn test_loaded_emission() { 8 ); SubtensorModule::generate_emission(block); - assert!(!SubtensorModule::has_loaded_emission_tuples(netuid)); + assert!(SubtensorModule::get_loaded_emission_tuples(netuid).is_none()); // Try loading at block = 9; let block: u64 = 8; @@ -37,9 +38,11 @@ fn test_loaded_emission() { 0 ); SubtensorModule::generate_emission(block); - assert!(SubtensorModule::has_loaded_emission_tuples(netuid)); + assert!(SubtensorModule::get_loaded_emission_tuples(netuid).is_some()); assert_eq!( - SubtensorModule::get_loaded_emission_tuples(netuid).len(), + SubtensorModule::get_loaded_emission_tuples(netuid) + .unwrap() + .len(), n as usize ); @@ -47,37 +50,32 @@ fn test_loaded_emission() { // None remaining because we are at epoch. let block: u64 = 8; SubtensorModule::drain_emission(block); - assert!(!SubtensorModule::has_loaded_emission_tuples(netuid)); + assert!(SubtensorModule::get_loaded_emission_tuples(netuid).is_none()); // Generate more emission. SubtensorModule::generate_emission(8); assert_eq!( - SubtensorModule::get_loaded_emission_tuples(netuid).len(), + SubtensorModule::get_loaded_emission_tuples(netuid) + .unwrap() + .len(), n as usize ); for block in 9..19 { let mut n_remaining: usize = 0; let mut n_to_drain: usize = 0; - if SubtensorModule::has_loaded_emission_tuples(netuid) { - n_remaining = SubtensorModule::get_loaded_emission_tuples(netuid).len(); - n_to_drain = SubtensorModule::tuples_to_drain_this_block( - netuid, - tempo, - block, - SubtensorModule::get_loaded_emission_tuples(netuid).len(), - ); + if let Some(tuples) = SubtensorModule::get_loaded_emission_tuples(netuid) { + n_remaining = tuples.len(); + n_to_drain = + SubtensorModule::tuples_to_drain_this_block(netuid, tempo, block, tuples.len()); } SubtensorModule::drain_emission(block); // drain it with 9 more blocks to go - if SubtensorModule::has_loaded_emission_tuples(netuid) { - assert_eq!( - SubtensorModule::get_loaded_emission_tuples(netuid).len(), - n_remaining - n_to_drain - ); + if let Some(tuples) = SubtensorModule::get_loaded_emission_tuples(netuid) { + assert_eq!(tuples.len(), n_remaining - n_to_drain); } - log::info!("n_to_drain:{:?}", n_to_drain.clone()); + log::info!("n_to_drain: {:?}", n_to_drain); log::info!( - "SubtensorModule::get_loaded_emission_tuples( netuid ).len():{:?}", + "SubtensorModule::get_loaded_emission_tuples( netuid ).len(): {:?}", n_remaining - n_to_drain ); } From 82aef268b3a533f28cbe61b0b8db6c6b290de066 Mon Sep 17 00:00:00 2001 From: Keith Date: Mon, 29 Apr 2024 13:07:13 +0800 Subject: [PATCH 04/10] Change registry pallet to use fungible traits --- pallets/registry/src/lib.rs | 44 +++++++++++++++++++++++++++++-------- runtime/src/lib.rs | 5 +---- 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/pallets/registry/src/lib.rs b/pallets/registry/src/lib.rs index b23dceb42..5341f3f8c 100644 --- a/pallets/registry/src/lib.rs +++ b/pallets/registry/src/lib.rs @@ -12,17 +12,20 @@ pub use pallet::*; pub use types::*; pub use weights::WeightInfo; -use frame_support::traits::Currency; +use frame_support::traits::tokens::{ + fungible::{self, MutateHold as _}, + Precision, +}; use sp_runtime::traits::Zero; use sp_std::boxed::Box; type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as fungible::Inspect<::AccountId>>::Balance; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{pallet_prelude::*, traits::ReservableCurrency}; + use frame_support::{pallet_prelude::*, traits::tokens::fungible}; use frame_system::pallet_prelude::*; #[pallet::pallet] @@ -36,7 +39,7 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; // Currency type that will be used to place deposits on neurons - type Currency: ReservableCurrency + Send + Sync; + type Currency: fungible::MutateHold; // Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; @@ -56,6 +59,9 @@ pub mod pallet { /// The amount held on deposit per additional field for a registered identity. #[pallet::constant] type FieldDeposit: Get>; + + /// Reasons for putting funds on hold. + type RuntimeHoldReason: From; } #[pallet::event] @@ -75,6 +81,11 @@ pub mod pallet { NotRegistered, } + #[pallet::composite_enum] + pub enum HoldReason { + RegistryIdentity, + } + /// Identity data by account #[pallet::storage] #[pallet::getter(fn identity_of)] @@ -125,11 +136,21 @@ pub mod pallet { let old_deposit = id.deposit; id.deposit = T::InitialDeposit::get() + fd; if id.deposit > old_deposit { - T::Currency::reserve(&who, id.deposit - old_deposit)?; + T::Currency::hold( + &HoldReason::RegistryIdentity.into(), + &who, + id.deposit - old_deposit, + )?; } if old_deposit > id.deposit { - let err_amount = T::Currency::unreserve(&who, old_deposit - id.deposit); - debug_assert!(err_amount.is_zero()); + let release_res = T::Currency::release( + &HoldReason::RegistryIdentity.into(), + &who, + old_deposit - id.deposit, + Precision::BestEffort, + ); + debug_assert!(release_res + .is_ok_and(|released_amount| released_amount == (old_deposit - id.deposit))); } >::insert(&identified, id); @@ -153,8 +174,13 @@ pub mod pallet { let id = >::take(&identified).ok_or(Error::::NotRegistered)?; let deposit = id.total_deposit(); - let err_amount = T::Currency::unreserve(&who, deposit); - debug_assert!(err_amount.is_zero()); + let release_res = T::Currency::release( + &HoldReason::RegistryIdentity.into(), + &who, + deposit, + Precision::BestEffort, + ); + debug_assert!(release_res.is_ok_and(|released_amount| released_amount == deposit)); Self::deposit_event(Event::IdentityDissolved { who: identified }); diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index c2ea64da1..0d41e7044 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -713,6 +713,7 @@ parameter_types! { impl pallet_registry::Config for Runtime { type RuntimeEvent = RuntimeEvent; + type RuntimeHoldReason = RuntimeHoldReason; type Currency = Balances; type CanRegister = AllowIdentityReg; type WeightInfo = pallet_registry::weights::SubstrateWeight; @@ -941,10 +942,6 @@ impl SubtensorModule::increase_stake_on_coldkey_hotkey_account(coldkey, hotkey, increment); } - fn u64_to_balance(input: u64) -> Option { - SubtensorModule::u64_to_balance(input) - } - fn add_balance_to_coldkey_account(coldkey: &AccountId, amount: Balance) { SubtensorModule::add_balance_to_coldkey_account(coldkey, amount); } From 2ecdb7638ff7ba6a55ae97116d5b2c354134cc66 Mon Sep 17 00:00:00 2001 From: Keith Date: Mon, 29 Apr 2024 13:10:12 +0800 Subject: [PATCH 05/10] Fix compilation error --- pallets/admin-utils/tests/mock.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pallets/admin-utils/tests/mock.rs b/pallets/admin-utils/tests/mock.rs index de5efce03..abf663678 100644 --- a/pallets/admin-utils/tests/mock.rs +++ b/pallets/admin-utils/tests/mock.rs @@ -279,10 +279,6 @@ impl pallet_admin_utils::SubtensorInterface f SubtensorModule::increase_stake_on_coldkey_hotkey_account(coldkey, hotkey, increment); } - fn u64_to_balance(input: u64) -> Option { - SubtensorModule::u64_to_balance(input) - } - fn add_balance_to_coldkey_account(coldkey: &AccountId, amount: Balance) { SubtensorModule::add_balance_to_coldkey_account(coldkey, amount); } From 9032f95662e9fe4dcd539e1b8c02b40ca77957a8 Mon Sep 17 00:00:00 2001 From: Keith Date: Tue, 30 Apr 2024 01:54:52 +0800 Subject: [PATCH 06/10] cargo fmt --- node/src/chain_spec.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/node/src/chain_spec.rs b/node/src/chain_spec.rs index 75d8497ab..6dbb68b02 100644 --- a/node/src/chain_spec.rs +++ b/node/src/chain_spec.rs @@ -93,14 +93,16 @@ pub fn finney_mainnet_config() -> Result { Vec<(sp_runtime::AccountId32, (u64, u16))>, )> = Vec::new(); for (coldkey_str, hotkeys) in old_state.stakes.iter() { - let coldkey = ::from_ss58check(coldkey_str).map_err(|e| e.to_string())?; + let coldkey = ::from_ss58check(coldkey_str) + .map_err(|e| e.to_string())?; let coldkey_account = sp_runtime::AccountId32::from(coldkey); let mut processed_hotkeys: Vec<(sp_runtime::AccountId32, (u64, u16))> = Vec::new(); for (hotkey_str, amount_uid) in hotkeys.iter() { let (amount, uid) = amount_uid; - let hotkey = ::from_ss58check(hotkey_str).map_err(|e| e.to_string())?; + let hotkey = ::from_ss58check(hotkey_str) + .map_err(|e| e.to_string())?; let hotkey_account = sp_runtime::AccountId32::from(hotkey); processed_hotkeys.push((hotkey_account, (*amount, *uid))); @@ -112,7 +114,8 @@ pub fn finney_mainnet_config() -> Result { let mut balances_issuance: u64 = 0; let mut processed_balances: Vec<(sp_runtime::AccountId32, u64)> = Vec::new(); for (key_str, amount) in old_state.balances.iter() { - let key = ::from_ss58check(key_str).map_err(|e| e.to_string())?; + let key = + ::from_ss58check(key_str).map_err(|e| e.to_string())?; let key_account = sp_runtime::AccountId32::from(key); processed_balances.push((key_account, *amount)); @@ -269,14 +272,16 @@ pub fn finney_testnet_config() -> Result { Vec<(sp_runtime::AccountId32, (u64, u16))>, )> = Vec::new(); for (coldkey_str, hotkeys) in old_state.stakes.iter() { - let coldkey = ::from_ss58check(coldkey_str).map_err(|e| e.to_string())?; + let coldkey = ::from_ss58check(coldkey_str) + .map_err(|e| e.to_string())?; let coldkey_account = sp_runtime::AccountId32::from(coldkey); let mut processed_hotkeys: Vec<(sp_runtime::AccountId32, (u64, u16))> = Vec::new(); for (hotkey_str, amount_uid) in hotkeys.iter() { let (amount, uid) = amount_uid; - let hotkey = ::from_ss58check(hotkey_str).map_err(|e| e.to_string())?; + let hotkey = ::from_ss58check(hotkey_str) + .map_err(|e| e.to_string())?; let hotkey_account = sp_runtime::AccountId32::from(hotkey); processed_hotkeys.push((hotkey_account, (*amount, *uid))); @@ -288,7 +293,8 @@ pub fn finney_testnet_config() -> Result { let mut balances_issuance: u64 = 0; let mut processed_balances: Vec<(sp_runtime::AccountId32, u64)> = Vec::new(); for (key_str, amount) in old_state.balances.iter() { - let key = ::from_ss58check(key_str).map_err(|e| e.to_string())?; + let key = + ::from_ss58check(key_str).map_err(|e| e.to_string())?; let key_account = sp_runtime::AccountId32::from(key); processed_balances.push((key_account, *amount)); From 16bbbdbdb3f1c492e5892ce0435dfdb9cb34eb7a Mon Sep 17 00:00:00 2001 From: Keith Date: Tue, 30 Apr 2024 02:07:07 +0800 Subject: [PATCH 07/10] Use set_balance instead of make_free_balance_be --- pallets/registry/src/benchmarking.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/registry/src/benchmarking.rs b/pallets/registry/src/benchmarking.rs index fe3866dda..4a908cd20 100644 --- a/pallets/registry/src/benchmarking.rs +++ b/pallets/registry/src/benchmarking.rs @@ -40,7 +40,7 @@ mod benchmarks { fn set_identity() { // The target user let caller: T::AccountId = whitelisted_caller(); - let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let _ = T::Currency::set_balance(&caller, BalanceOf::::max_value()); #[extrinsic_call] _( @@ -56,7 +56,7 @@ mod benchmarks { fn clear_identity() { // The target user let caller: T::AccountId = whitelisted_caller(); - let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let _ = T::Currency::set_balance(&caller, BalanceOf::::max_value()); let vali_account = account::("account", 0, 0u32); From fdc4732e3f255ce664fbf0267a8ede0f18ca4ba1 Mon Sep 17 00:00:00 2001 From: Keith Date: Tue, 30 Apr 2024 02:08:24 +0800 Subject: [PATCH 08/10] Commit missing merged files --- pallets/subtensor/src/root.rs | 3 -- pallets/subtensor/src/staking.rs | 77 +------------------------------- 2 files changed, 1 insertion(+), 79 deletions(-) diff --git a/pallets/subtensor/src/root.rs b/pallets/subtensor/src/root.rs index d68eb9b7b..f718f84de 100644 --- a/pallets/subtensor/src/root.rs +++ b/pallets/subtensor/src/root.rs @@ -643,10 +643,7 @@ impl Pallet { // --- 2. Calculate and lock the required tokens. let lock_amount: u64 = Self::get_network_lock_cost(); -<<<<<<< HEAD -======= log::debug!("network lock_amount: {:?}", lock_amount); ->>>>>>> origin/development ensure!( Self::can_remove_balance_from_coldkey_account(&coldkey, lock_amount), Error::::NotEnoughBalanceToStake diff --git a/pallets/subtensor/src/staking.rs b/pallets/subtensor/src/staking.rs index 2d2410db3..9a0005b6c 100644 --- a/pallets/subtensor/src/staking.rs +++ b/pallets/subtensor/src/staking.rs @@ -141,41 +141,25 @@ impl Pallet { stake_to_be_added ); -<<<<<<< HEAD - // --- 2. Ensure the callers coldkey has enough stake to perform the transaction. -======= // Ensure the callers coldkey has enough stake to perform the transaction. ->>>>>>> origin/development ensure!( Self::can_remove_balance_from_coldkey_account(&coldkey, stake_to_be_added), Error::::NotEnoughBalanceToStake ); -<<<<<<< HEAD - // --- 3. Ensure that the hotkey account exists this is only possible through registration. -======= // Ensure that the hotkey account exists this is only possible through registration. ->>>>>>> origin/development ensure!( Self::hotkey_account_exists(&hotkey), Error::::NotRegistered ); -<<<<<<< HEAD - // --- 4. Ensure that the hotkey allows delegation or that the hotkey is owned by the calling coldkey. -======= // Ensure that the hotkey allows delegation or that the hotkey is owned by the calling coldkey. ->>>>>>> origin/development ensure!( Self::hotkey_is_delegate(&hotkey) || Self::coldkey_owns_hotkey(&coldkey, &hotkey), Error::::NonAssociatedColdKey ); -<<<<<<< HEAD - // --- 5. Ensure we don't exceed stake rate limit -======= // Ensure we don't exceed stake rate limit ->>>>>>> origin/development let stakes_this_interval = Self::get_stakes_this_interval_for_coldkey_hotkey(&coldkey, &hotkey); ensure!( @@ -183,11 +167,7 @@ impl Pallet { Error::::StakeRateLimitExceeded ); -<<<<<<< HEAD - // --- 6. If this is a nomination stake, check if total stake after adding will be above -======= // If this is a nomination stake, check if total stake after adding will be above ->>>>>>> origin/development // the minimum required stake. // If coldkey is not owner of the hotkey, it's a nomination stake. @@ -201,30 +181,18 @@ impl Pallet { ); } -<<<<<<< HEAD - // --- 7. Ensure the remove operation from the coldkey is a success. - let actual_amount_to_stake = - Self::remove_balance_from_coldkey_account(&coldkey, stake_to_be_added)?; - - // --- 8. If we reach here, add the balance to the hotkey. -======= // Ensure the remove operation from the coldkey is a success. let actual_amount_to_stake = Self::remove_balance_from_coldkey_account(&coldkey, stake_to_be_added)?; // If we reach here, add the balance to the hotkey. ->>>>>>> origin/development Self::increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, actual_amount_to_stake); // Set last block for rate limiting let block: u64 = Self::get_current_block_as_u64(); Self::set_last_tx_block(&coldkey, block); -<<<<<<< HEAD - // --- 9. Emit the staking event. -======= // Emit the staking event. ->>>>>>> origin/development Self::set_stakes_this_interval_for_coldkey_hotkey( &coldkey, &hotkey, @@ -238,11 +206,7 @@ impl Pallet { ); Self::deposit_event(Event::StakeAdded(hotkey, actual_amount_to_stake)); -<<<<<<< HEAD - // --- 10. Ok and return. -======= // Ok and return. ->>>>>>> origin/development Ok(()) } @@ -317,11 +281,7 @@ impl Pallet { Error::::NotEnoughStaketoWithdraw ); -<<<<<<< HEAD - // --- 5. Ensure we don't exceed stake rate limit -======= // Ensure we don't exceed stake rate limit ->>>>>>> origin/development let unstakes_this_interval = Self::get_stakes_this_interval_for_coldkey_hotkey(&coldkey, &hotkey); ensure!( @@ -329,11 +289,7 @@ impl Pallet { Error::::UnstakeRateLimitExceeded ); -<<<<<<< HEAD - // --- 6. If this is a nomination stake, check if total stake after removing will be above -======= // If this is a nomination stake, check if total stake after removing will be above ->>>>>>> origin/development // the minimum required stake. // If coldkey is not owner of the hotkey, it's a nomination stake. @@ -347,28 +303,17 @@ impl Pallet { ); } -<<<<<<< HEAD - // --- 7. We remove the balance from the hotkey. - Self::decrease_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake_to_be_removed); - - // --- 8. We add the balancer to the coldkey. If the above fails we will not credit this coldkey. -======= // We remove the balance from the hotkey. Self::decrease_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake_to_be_removed); // We add the balancer to the coldkey. If the above fails we will not credit this coldkey. ->>>>>>> origin/development Self::add_balance_to_coldkey_account(&coldkey, stake_to_be_removed); // Set last block for rate limiting let block: u64 = Self::get_current_block_as_u64(); Self::set_last_tx_block(&coldkey, block); -<<<<<<< HEAD - // --- 9. Emit the unstaking event. -======= // Emit the unstaking event. ->>>>>>> origin/development Self::set_stakes_this_interval_for_coldkey_hotkey( &coldkey, &hotkey, @@ -382,11 +327,7 @@ impl Pallet { ); Self::deposit_event(Event::StakeRemoved(hotkey, stake_to_be_removed)); -<<<<<<< HEAD - // --- 10. Done and ok. -======= // Done and ok. ->>>>>>> origin/development Ok(()) } @@ -610,15 +551,9 @@ impl Pallet { if stake < Self::get_nominator_min_required_stake() { // Remove the stake from the nominator account. (this is a more forceful unstake operation which ) // Actually deletes the staking account. -<<<<<<< HEAD - Self::empty_stake_on_coldkey_hotkey_account(&coldkey, &hotkey); - // Add the stake to the coldkey account. - Self::add_balance_to_coldkey_account(&coldkey, stake); -======= Self::empty_stake_on_coldkey_hotkey_account(coldkey, hotkey); - // Convert the removed stake back to balance and add it to the coldkey account. + // Add the stake to the coldkey account. Self::add_balance_to_coldkey_account(coldkey, stake); ->>>>>>> origin/development } } } @@ -705,7 +640,6 @@ impl Pallet { hotkey, ) { -<<<<<<< HEAD // Remove the stake from the coldkey - hotkey pairing. Self::decrease_stake_on_coldkey_hotkey_account( &delegate_coldkey_i, @@ -718,15 +652,6 @@ impl Pallet { &delegate_coldkey_i, stake_i, ); -======= - // Stake is successfully converted to balance. - - // Remove the stake from the coldkey - hotkey pairing. - Self::decrease_stake_on_coldkey_hotkey_account(&delegate_coldkey_i, hotkey, stake_i); - - // Add the balance to the coldkey account. - Self::add_balance_to_coldkey_account(&delegate_coldkey_i, stake_i); ->>>>>>> origin/development } } } From d509dd8a8e7927e52935a45448d382361bc53292 Mon Sep 17 00:00:00 2001 From: Keith Date: Tue, 30 Apr 2024 13:41:55 +0800 Subject: [PATCH 09/10] cargo fmt --- pallets/subtensor/src/block_step.rs | 2 +- pallets/subtensor/src/delegate_info.rs | 7 +- pallets/subtensor/src/epoch.rs | 91 ++++--- pallets/subtensor/src/math.rs | 324 +++++++++++++++---------- pallets/subtensor/src/migration.rs | 3 +- pallets/subtensor/src/root.rs | 10 +- pallets/subtensor/src/stake_info.rs | 14 +- pallets/subtensor/src/staking.rs | 11 +- pallets/subtensor/src/weights.rs | 6 +- 9 files changed, 279 insertions(+), 189 deletions(-) diff --git a/pallets/subtensor/src/block_step.rs b/pallets/subtensor/src/block_step.rs index 5583121f6..84530d279 100644 --- a/pallets/subtensor/src/block_step.rs +++ b/pallets/subtensor/src/block_step.rs @@ -86,7 +86,7 @@ impl Pallet { for (netuid, _) in as IterableStorageMap>::iter() { let Some(tuples_to_drain) = Self::get_loaded_emission_tuples(netuid) else { // There are no tuples to emit. - continue + continue; }; let mut total_emitted: u64 = 0; for (hotkey, server_amount, validator_amount) in tuples_to_drain.iter() { diff --git a/pallets/subtensor/src/delegate_info.rs b/pallets/subtensor/src/delegate_info.rs index 20f21aca4..afd540eba 100644 --- a/pallets/subtensor/src/delegate_info.rs +++ b/pallets/subtensor/src/delegate_info.rs @@ -108,10 +108,9 @@ impl Pallet { } pub fn get_delegated(delegatee_account_vec: Vec) -> Vec<(DelegateInfo, Compact)> { - let Ok(delegatee) = - T::AccountId::decode(&mut delegatee_account_vec.as_bytes_ref()) else { - return Vec::new(); // No delegates for invalid account - }; + let Ok(delegatee) = T::AccountId::decode(&mut delegatee_account_vec.as_bytes_ref()) else { + return Vec::new(); // No delegates for invalid account + }; let mut delegates: Vec<(DelegateInfo, Compact)> = Vec::new(); for delegate in as IterableStorageMap>::iter_keys() { diff --git a/pallets/subtensor/src/epoch.rs b/pallets/subtensor/src/epoch.rs index 3ba07369e..52e81355b 100644 --- a/pallets/subtensor/src/epoch.rs +++ b/pallets/subtensor/src/epoch.rs @@ -60,7 +60,8 @@ impl Pallet { // =========== let hotkeys: Vec<(u16, T::AccountId)> = - as IterableStorageDoubleMap>::iter_prefix(netuid).collect(); + as IterableStorageDoubleMap>::iter_prefix(netuid) + .collect(); log::trace!("hotkeys: {:?}", &hotkeys); // Access network stake as normalized vector. @@ -306,24 +307,35 @@ impl Pallet { // Column max-upscale EMA bonds for storage: max_i w_ij = 1. inplace_col_max_upscale(&mut ema_bonds); - new_validator_permits.iter().zip(validator_permits).zip(ema_bonds).enumerate().for_each(|(i, ((new_permit, validator_permit), ema_bond))| { - // Set bonds only if uid retains validator permit, otherwise clear bonds. - if *new_permit { - let new_bonds_row: Vec<(u16, u16)> = (0..n) - .zip(vec_fixed_proportions_to_u16(ema_bond.clone())) - .collect(); - Bonds::::insert(netuid, i as u16, new_bonds_row); - } else if validator_permit { - // Only overwrite the intersection. - let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; - Bonds::::insert(netuid, i as u16, new_empty_bonds_row); - } - }); + new_validator_permits + .iter() + .zip(validator_permits) + .zip(ema_bonds) + .enumerate() + .for_each(|(i, ((new_permit, validator_permit), ema_bond))| { + // Set bonds only if uid retains validator permit, otherwise clear bonds. + if *new_permit { + let new_bonds_row: Vec<(u16, u16)> = (0..n) + .zip(vec_fixed_proportions_to_u16(ema_bond.clone())) + .collect(); + Bonds::::insert(netuid, i as u16, new_bonds_row); + } else if validator_permit { + // Only overwrite the intersection. + let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; + Bonds::::insert(netuid, i as u16, new_empty_bonds_row); + } + }); - hotkeys.into_iter().map(|(uid_i, hotkey)| { - (hotkey, server_emission[uid_i as usize], validator_emission[uid_i as usize]) - }) - .collect() + hotkeys + .into_iter() + .map(|(uid_i, hotkey)| { + ( + hotkey, + server_emission[uid_i as usize], + validator_emission[uid_i as usize], + ) + }) + .collect() } // Calculates reward consensus values, then updates rank, trust, consensus, incentive, dividend, pruning_score, emission and bonds, and @@ -380,7 +392,8 @@ impl Pallet { // =========== let hotkeys: Vec<(u16, T::AccountId)> = - as IterableStorageDoubleMap>::iter_prefix(netuid).collect(); + as IterableStorageDoubleMap>::iter_prefix(netuid) + .collect(); log::trace!("hotkeys: {:?}", &hotkeys); // Access network stake as normalized vector. @@ -675,10 +688,16 @@ impl Pallet { }); // Emission tuples ( hotkeys, server_emission, validator_emission ) - hotkeys.into_iter().map(|(uid_i, hotkey)| { - (hotkey, server_emission[uid_i as usize], validator_emission[uid_i as usize]) - }) - .collect() + hotkeys + .into_iter() + .map(|(uid_i, hotkey)| { + ( + hotkey, + server_emission[uid_i as usize], + validator_emission[uid_i as usize], + ) + }) + .collect() } pub fn get_float_rho(netuid: u16) -> I32F32 { @@ -690,10 +709,11 @@ impl Pallet { pub fn get_normalized_stake(netuid: u16) -> Vec { let n = Self::get_subnetwork_n(netuid); - let mut stake_64: Vec = (0..n).map(|neuron_uid| I64F64::from_num( - Self::get_stake_for_uid_and_subnetwork(netuid, neuron_uid), - )) - .collect(); + let mut stake_64: Vec = (0..n) + .map(|neuron_uid| { + I64F64::from_num(Self::get_stake_for_uid_and_subnetwork(netuid, neuron_uid)) + }) + .collect(); inplace_normalize_64(&mut stake_64); let stake: Vec = vec_fixed64_to_fixed32(stake_64); stake @@ -701,14 +721,15 @@ impl Pallet { pub fn get_block_at_registration(netuid: u16) -> Vec { let n = Self::get_subnetwork_n(netuid); - let block_at_registration: Vec = (0..n).map(|neuron_uid| { - if Keys::::contains_key(netuid, neuron_uid) { - Self::get_neuron_block_at_registration(netuid, neuron_uid) - } else { - 0 - } - }) - .collect(); + let block_at_registration: Vec = (0..n) + .map(|neuron_uid| { + if Keys::::contains_key(netuid, neuron_uid) { + Self::get_neuron_block_at_registration(netuid, neuron_uid) + } else { + 0 + } + }) + .collect(); block_at_registration } diff --git a/pallets/subtensor/src/math.rs b/pallets/subtensor/src/math.rs index aa633fe61..9256206cd 100644 --- a/pallets/subtensor/src/math.rs +++ b/pallets/subtensor/src/math.rs @@ -264,14 +264,16 @@ pub fn inplace_normalize_64(x: &mut [I64F64]) { #[allow(dead_code)] pub fn vecdiv(x: &[I32F32], y: &[I32F32]) -> Vec { assert_eq!(x.len(), y.len()); - x.iter().zip(y).map(|(x_i, y_i)| { - if *y_i != 0 { - x_i / y_i - } else { - I32F32::from_num(0) - } - }) - .collect() + x.iter() + .zip(y) + .map(|(x_i, y_i)| { + if *y_i != 0 { + x_i / y_i + } else { + I32F32::from_num(0) + } + }) + .collect() } // Normalizes (sum to 1 except 0) each row (dim=0) of a matrix in-place. @@ -313,7 +315,10 @@ pub fn row_sum(x: &[Vec]) -> Vec { // Sum across each row (dim=0) of a sparse matrix. #[allow(dead_code)] pub fn row_sum_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec { - sparse_matrix.into_iter().map(|row| row.into_iter().map(|(_, value)| value).sum()).collect() + sparse_matrix + .into_iter() + .map(|row| row.into_iter().map(|(_, value)| value).sum()) + .collect() } // Sum across each column (dim=1) of a matrix. @@ -326,9 +331,13 @@ pub fn col_sum(x: &[Vec]) -> Vec { if cols == 0 { return vec![]; } - x.into_iter().fold(vec![I32F32::from_num(0); cols], |acc, next_row| { - acc.into_iter().zip(next_row).map(|(acc_elem, next_elem)| acc_elem + next_elem).collect() - }) + x.into_iter() + .fold(vec![I32F32::from_num(0); cols], |acc, next_row| { + acc.into_iter() + .zip(next_row) + .map(|(acc_elem, next_elem)| acc_elem + next_elem) + .collect() + }) } // Sum across each column (dim=1) of a sparse matrix. @@ -372,16 +381,21 @@ pub fn inplace_col_normalize(x: &mut [Vec]) { return; } let cols = first_row.len(); - let col_sums = x.into_iter().fold(vec![I32F32::from_num(0.0); cols], |acc, row| { - row.into_iter().zip(acc).map(|(&mut m_val, acc_val)| { - acc_val + m_val - }) - .collect() - }); - x.into_iter().for_each(|row| { - row.into_iter().zip(&col_sums).filter(|(_, col_sum)| **col_sum != I32F32::from_num(0_f32)).for_each(|(m_val, col_sum)| { - *m_val /= col_sum; + let col_sums = x + .into_iter() + .fold(vec![I32F32::from_num(0.0); cols], |acc, row| { + row.into_iter() + .zip(acc) + .map(|(&mut m_val, acc_val)| acc_val + m_val) + .collect() }); + x.into_iter().for_each(|row| { + row.into_iter() + .zip(&col_sums) + .filter(|(_, col_sum)| **col_sum != I32F32::from_num(0_f32)) + .for_each(|(m_val, col_sum)| { + *m_val /= col_sum; + }); }); } @@ -416,16 +430,21 @@ pub fn inplace_col_max_upscale(x: &mut [Vec]) { return; } let cols = first_row.len(); - let col_maxes = x.into_iter().fold(vec![I32F32::from_num(0_f32); cols], |acc, row| { - row.into_iter().zip(acc).map(|(m_val, acc_val)| { - acc_val.max(*m_val) - }) - .collect() - }); - x.into_iter().for_each(|row| { - row.into_iter().zip(&col_maxes).filter(|(_, col_max)| **col_max != I32F32::from_num(0)).for_each(|(m_val, col_max)| { - *m_val /= col_max; + let col_maxes = x + .into_iter() + .fold(vec![I32F32::from_num(0_f32); cols], |acc, row| { + row.into_iter() + .zip(acc) + .map(|(m_val, acc_val)| acc_val.max(*m_val)) + .collect() }); + x.into_iter().for_each(|row| { + row.into_iter() + .zip(&col_maxes) + .filter(|(_, col_max)| **col_max != I32F32::from_num(0)) + .for_each(|(m_val, col_max)| { + *m_val /= col_max; + }); }); } @@ -437,9 +456,12 @@ pub fn inplace_mask_vector(mask: &[bool], vector: &mut [I32F32]) { } assert_eq!(mask.len(), vector.len()); let zero: I32F32 = I32F32::from_num(0.0); - mask.into_iter().zip(vector).filter(|(m, _)| **m).for_each(|(_, v_elem)| { - *v_elem = zero; - }); + mask.into_iter() + .zip(vector) + .filter(|(m, _)| **m) + .for_each(|(_, v_elem)| { + *v_elem = zero; + }); } // Apply mask to matrix, mask=true will mask out, i.e. set to 0. @@ -453,11 +475,17 @@ pub fn inplace_mask_matrix(mask: &[Vec], matrix: &mut Vec>) { } assert_eq!(mask.len(), matrix.len()); let zero: I32F32 = I32F32::from_num(0.0); - mask.into_iter().zip(matrix).for_each(|(mask_row, matrix_row)| { - mask_row.into_iter().zip(matrix_row).filter(|(mask_elem, _)| **mask_elem).for_each(|(_, matrix_elem)| { - *matrix_elem = zero; + mask.into_iter() + .zip(matrix) + .for_each(|(mask_row, matrix_row)| { + mask_row + .into_iter() + .zip(matrix_row) + .filter(|(mask_elem, _)| **mask_elem) + .for_each(|(_, matrix_elem)| { + *matrix_elem = zero; + }); }); - }); } // Apply row mask to matrix, mask=true will mask out, i.e. set to 0. @@ -469,11 +497,14 @@ pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec]) { let cols = first_row.len(); assert_eq!(mask.len(), matrix.len()); let zero: I32F32 = I32F32::from_num(0); - matrix.into_iter().zip(mask).for_each(|(row_elem, mask_row)| { - if *mask_row { - *row_elem = vec![zero; cols]; - } - }); + matrix + .into_iter() + .zip(mask) + .for_each(|(row_elem, mask_row)| { + if *mask_row { + *row_elem = vec![zero; cols]; + } + }); } // Mask out the diagonal of the input matrix in-place. @@ -503,23 +534,32 @@ pub fn mask_rows_sparse( sparse_matrix: &[Vec<(u16, I32F32)>], ) -> Vec> { assert_eq!(sparse_matrix.len(), mask.len()); - mask.into_iter().zip(sparse_matrix).map(|(mask_elem, sparse_row)| { - if *mask_elem { - vec![] - } else { - sparse_row.clone() - } - }) - .collect() + mask.into_iter() + .zip(sparse_matrix) + .map(|(mask_elem, sparse_row)| { + if *mask_elem { + vec![] + } else { + sparse_row.clone() + } + }) + .collect() } // Return a new sparse matrix with a masked out diagonal of input sparse matrix. #[allow(dead_code)] pub fn mask_diag_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec> { - sparse_matrix.into_iter().enumerate().map(|(i, sparse_row)| { - sparse_row.into_iter().filter(|(j, _)| i != (*j as usize)).copied().collect() - }) - .collect() + sparse_matrix + .into_iter() + .enumerate() + .map(|(i, sparse_row)| { + sparse_row + .into_iter() + .filter(|(j, _)| i != (*j as usize)) + .copied() + .collect() + }) + .collect() } // Remove cells from sparse matrix where the mask function of two vectors is true. @@ -551,10 +591,11 @@ pub fn row_hadamard(matrix: &[Vec], vector: &[I32F32]) -> Vec], vector: &[I32F32], ) -> Vec> { - sparse_matrix.into_iter().zip(vector).map(|(sparse_row, vec_val)| { - sparse_row.into_iter().map(|(j, value)| (*j, *value * *vec_val)).collect() - }) - .collect() + sparse_matrix + .into_iter() + .zip(vector) + .map(|(sparse_row, vec_val)| { + sparse_row + .into_iter() + .map(|(j, value)| (*j, *value * *vec_val)) + .collect() + }) + .collect() } // Row-wise matrix-vector product, column-wise sum: result_j = SUM(i) vector_i * matrix_ij. @@ -580,15 +627,20 @@ pub fn matmul(matrix: &[Vec], vector: &[I32F32]) -> Vec { return vec![]; } assert!(matrix.len() == vector.len()); - matrix.into_iter().zip(vector).fold(vec![I32F32::from_num(0_f32); cols], |acc, (row, vec_val)| { - row.into_iter().zip(acc).map(|(m_val, acc_val)| { - // Compute ranks: r_j = SUM(i) w_ij * s_i - // Compute trust scores: t_j = SUM(i) w_ij * s_i - // result_j = SUM(i) vector_i * matrix_ij - acc_val + vec_val * m_val - }) - .collect() - }) + matrix.into_iter().zip(vector).fold( + vec![I32F32::from_num(0_f32); cols], + |acc, (row, vec_val)| { + row.into_iter() + .zip(acc) + .map(|(m_val, acc_val)| { + // Compute ranks: r_j = SUM(i) w_ij * s_i + // Compute trust scores: t_j = SUM(i) w_ij * s_i + // result_j = SUM(i) vector_i * matrix_ij + acc_val + vec_val * m_val + }) + .collect() + }, + ) } // Row-wise matrix-vector product, column-wise sum: result_j = SUM(i) vector_i * matrix_ij. @@ -602,15 +654,20 @@ pub fn matmul_64(matrix: &[Vec], vector: &[I64F64]) -> Vec { return vec![]; } assert!(matrix.len() == vector.len()); - matrix.into_iter().zip(vector).fold(vec![I64F64::from_num(0.0); cols], |acc, (row, vec_val)| { - row.into_iter().zip(acc).map(|(m_val, acc_val)| { - // Compute ranks: r_j = SUM(i) w_ij * s_i - // Compute trust scores: t_j = SUM(i) w_ij * s_i - // result_j = SUM(i) vector_i * matrix_ij - acc_val + vec_val * m_val + matrix + .into_iter() + .zip(vector) + .fold(vec![I64F64::from_num(0.0); cols], |acc, (row, vec_val)| { + row.into_iter() + .zip(acc) + .map(|(m_val, acc_val)| { + // Compute ranks: r_j = SUM(i) w_ij * s_i + // Compute trust scores: t_j = SUM(i) w_ij * s_i + // result_j = SUM(i) vector_i * matrix_ij + acc_val + vec_val * m_val + }) + .collect() }) - .collect() - }) } // Column-wise matrix-vector product, row-wise sum: result_i = SUM(j) vector_j * matrix_ij. @@ -623,15 +680,19 @@ pub fn matmul_transpose(matrix: &[Vec], vector: &[I32F32]) -> Vec], col_threshold: &[I32F32]) { x.into_iter().for_each(|row| { - row.into_iter().zip(col_threshold).for_each(|(value, threshold)| { - *value = *threshold.min(value); - }); + row.into_iter() + .zip(col_threshold) + .for_each(|(value, threshold)| { + *value = *threshold.min(value); + }); }); } @@ -710,17 +773,13 @@ pub fn clip( upper: I32F32, lower: I32F32, ) -> Vec> { - x.into_iter().map(|row| { - row.into_iter().map(|elem| { - if *elem >= threshold { - upper - } else { - lower - } + x.into_iter() + .map(|row| { + row.into_iter() + .map(|elem| if *elem >= threshold { upper } else { lower }) + .collect() }) .collect() - }) - .collect() } // Set inplace matrix values below threshold to lower, and equal-above to upper. @@ -728,11 +787,7 @@ pub fn clip( pub fn inplace_clip(x: &mut [Vec], threshold: I32F32, upper: I32F32, lower: I32F32) { x.into_iter().for_each(|row| { row.into_iter().for_each(|elem| { - *elem = if *elem >= threshold { - upper - } else { - lower - }; + *elem = if *elem >= threshold { upper } else { lower }; }); }); } @@ -746,17 +801,20 @@ pub fn clip_sparse( upper: I32F32, lower: I32F32, ) -> Vec> { - sparse_matrix.into_iter().map(|row| { - row.into_iter().map(|(j, value)| { - if *value < threshold { - (*j, lower) - } else { - (*j, upper) - } + sparse_matrix + .into_iter() + .map(|row| { + row.into_iter() + .map(|(j, value)| { + if *value < threshold { + (*j, lower) + } else { + (*j, upper) + } + }) + .collect() }) .collect() - }) - .collect() } // Stake-weighted median score finding algorithm, based on a mid pivot binary search. @@ -935,11 +993,16 @@ pub fn hadamard(mat1: &[Vec], mat2: &[Vec]) -> Vec> if first_row.is_empty() { return vec![vec![]]; } - mat1.iter().zip(mat2).map(|(row1, row2)| { - assert!(row1.len() == row2.len()); - row1.iter().zip(row2).map(|(elem1, elem2)| elem1 * elem2).collect() - }) - .collect() + mat1.iter() + .zip(mat2) + .map(|(row1, row2)| { + assert!(row1.len() == row2.len()); + row1.iter() + .zip(row2) + .map(|(elem1, elem2)| elem1 * elem2) + .collect() + }) + .collect() } // Element-wise product of two sparse matrices. @@ -984,13 +1047,16 @@ pub fn mat_ema(new: &[Vec], old: &[Vec], alpha: I32F32) -> Vec= threshold of an input sparse matrix. #[allow(dead_code)] pub fn sparse_threshold(w: &[Vec<(u16, I32F32)>], threshold: I32F32) -> Vec> { - w.into_iter().map(|row| { - row.into_iter().filter(|(_, weight)| *weight >= threshold).copied().collect() - }) - .collect() + w.into_iter() + .map(|row| { + row.into_iter() + .filter(|(_, weight)| *weight >= threshold) + .copied() + .collect() + }) + .collect() } #[cfg(test)] diff --git a/pallets/subtensor/src/migration.rs b/pallets/subtensor/src/migration.rs index e62a03ecd..d698340c0 100644 --- a/pallets/subtensor/src/migration.rs +++ b/pallets/subtensor/src/migration.rs @@ -89,7 +89,8 @@ pub fn migrate_transfer_ownership_to_foundation(coldkey: [u8; 32]) -> // We have to decode this using a byte slice as we don't have crypto-std let coldkey_account: ::AccountId = - ::AccountId::decode(&mut &coldkey[..]).expect("coldkey is 32-byte array; qed"); + ::AccountId::decode(&mut &coldkey[..]) + .expect("coldkey is 32-byte array; qed"); info!("Foundation coldkey: {:?}", coldkey_account); let current_block = Pallet::::get_current_block_as_u64(); diff --git a/pallets/subtensor/src/root.rs b/pallets/subtensor/src/root.rs index f718f84de..635f93266 100644 --- a/pallets/subtensor/src/root.rs +++ b/pallets/subtensor/src/root.rs @@ -263,7 +263,11 @@ impl Pallet { for (netuid, weight_ij) in &weights_i { let idx = uid_i as usize; if let Some(weight) = weights.get_mut(idx) { - if let Some((w, _)) = weight.into_iter().zip(&subnet_list).find(|(_, subnet)| *subnet == netuid) { + if let Some((w, _)) = weight + .into_iter() + .zip(&subnet_list) + .find(|(_, subnet)| *subnet == netuid) + { *w = I64F64::from_num(*weight_ij); } } @@ -394,7 +398,9 @@ impl Pallet { log::debug!("C:\n{:?}\n", &consensus); let mut weighted_emission = vec![I64F64::from_num(0); total_networks as usize]; - for ((emission, consensus_i), rank) in weighted_emission.iter_mut().zip(&consensus).zip(&ranks) { + for ((emission, consensus_i), rank) in + weighted_emission.iter_mut().zip(&consensus).zip(&ranks) + { *emission = *consensus_i * (*rank); } inplace_normalize_64(&mut weighted_emission); diff --git a/pallets/subtensor/src/stake_info.rs b/pallets/subtensor/src/stake_info.rs index 36dc48288..d66235657 100644 --- a/pallets/subtensor/src/stake_info.rs +++ b/pallets/subtensor/src/stake_info.rs @@ -47,10 +47,9 @@ impl Pallet { if coldkey_account_vec.len() != 32 { continue; // Invalid coldkey } - let Ok(coldkey) = - T::AccountId::decode(&mut coldkey_account_vec.as_bytes_ref()) else { - continue; - }; + let Ok(coldkey) = T::AccountId::decode(&mut coldkey_account_vec.as_bytes_ref()) else { + continue; + }; coldkeys.push(coldkey); } @@ -66,10 +65,9 @@ impl Pallet { return Vec::new(); // Invalid coldkey } - let Ok(coldkey) = - T::AccountId::decode(&mut coldkey_account_vec.as_bytes_ref()) else { - return Vec::new(); - }; + let Ok(coldkey) = T::AccountId::decode(&mut coldkey_account_vec.as_bytes_ref()) else { + return Vec::new(); + }; let stake_info = Self::_get_stake_info_for_coldkeys(vec![coldkey]); if stake_info.is_empty() { diff --git a/pallets/subtensor/src/staking.rs b/pallets/subtensor/src/staking.rs index 9a0005b6c..a5e9f2c8f 100644 --- a/pallets/subtensor/src/staking.rs +++ b/pallets/subtensor/src/staking.rs @@ -641,17 +641,10 @@ impl Pallet { ) { // Remove the stake from the coldkey - hotkey pairing. - Self::decrease_stake_on_coldkey_hotkey_account( - &delegate_coldkey_i, - hotkey, - stake_i, - ); + Self::decrease_stake_on_coldkey_hotkey_account(&delegate_coldkey_i, hotkey, stake_i); // Add the balance to the coldkey account. - Self::add_balance_to_coldkey_account( - &delegate_coldkey_i, - stake_i, - ); + Self::add_balance_to_coldkey_account(&delegate_coldkey_i, stake_i); } } } diff --git a/pallets/subtensor/src/weights.rs b/pallets/subtensor/src/weights.rs index dbf735fe6..c912af61d 100644 --- a/pallets/subtensor/src/weights.rs +++ b/pallets/subtensor/src/weights.rs @@ -122,7 +122,7 @@ impl Pallet { ); // --- 8. Get the neuron uid of associated hotkey on network netuid. - + let neuron_uid = Self::get_uid_for_net_and_hotkey(netuid, &hotkey)?; // --- 9. Ensure the uid is not setting weights faster than the weights_set_rate_limit. @@ -322,7 +322,9 @@ impl Pallet { if weights.len() != 1 { return false; } - let Some(first_uid) = uids.first() else { return false; }; + let Some(first_uid) = uids.first() else { + return false; + }; if uid != *first_uid { return false; } From f61f92e03222078b6fe2120a68c364fe5abb7331 Mon Sep 17 00:00:00 2001 From: Keith Date: Tue, 30 Apr 2024 13:54:29 +0800 Subject: [PATCH 10/10] Require fungible::Mutate on Currency item for benchmarking --- pallets/registry/src/benchmarking.rs | 1 + pallets/registry/src/lib.rs | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pallets/registry/src/benchmarking.rs b/pallets/registry/src/benchmarking.rs index 4a908cd20..eadac7865 100644 --- a/pallets/registry/src/benchmarking.rs +++ b/pallets/registry/src/benchmarking.rs @@ -6,6 +6,7 @@ use super::*; use crate::Pallet as Registry; use frame_benchmarking::v1::account; use frame_benchmarking::v2::*; +use frame_support::traits::tokens::fungible::Mutate; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; diff --git a/pallets/registry/src/lib.rs b/pallets/registry/src/lib.rs index 5341f3f8c..e54386350 100644 --- a/pallets/registry/src/lib.rs +++ b/pallets/registry/src/lib.rs @@ -39,7 +39,8 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; // Currency type that will be used to place deposits on neurons - type Currency: fungible::MutateHold; + type Currency: fungible::Mutate + + fungible::MutateHold; // Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo;