From d30a76f38e374fb8b600d79f3ae19fea8e839a9d Mon Sep 17 00:00:00 2001 From: Evgeny Svirsky Date: Wed, 6 May 2026 12:55:00 +0200 Subject: [PATCH 01/11] Dynamic tempo implementation --- pallets/admin-utils/src/lib.rs | 5 +- pallets/subtensor/src/coinbase/block_step.rs | 6 +- pallets/subtensor/src/coinbase/mod.rs | 1 + pallets/subtensor/src/coinbase/root.rs | 3 + .../subtensor/src/coinbase/run_coinbase.rs | 94 ++++++++++---- .../subtensor/src/coinbase/tempo_control.rs | 109 ++++++++++++++++ pallets/subtensor/src/epoch/run_epoch.rs | 28 ++++- pallets/subtensor/src/lib.rs | 39 ++++++ pallets/subtensor/src/macros/dispatches.rs | 41 ++++++ pallets/subtensor/src/macros/errors.rs | 6 + pallets/subtensor/src/macros/events.rs | 32 +++++ pallets/subtensor/src/macros/hooks.rs | 4 +- .../src/migrations/migrate_dynamic_tempo.rs | 119 ++++++++++++++++++ pallets/subtensor/src/migrations/mod.rs | 1 + pallets/subtensor/src/subnets/subnet.rs | 6 + pallets/subtensor/src/utils/misc.rs | 34 ++++- pallets/subtensor/src/utils/rate_limiting.rs | 6 + runtime/src/lib.rs | 2 +- 18 files changed, 498 insertions(+), 38 deletions(-) create mode 100644 pallets/subtensor/src/coinbase/tempo_control.rs create mode 100644 pallets/subtensor/src/migrations/migrate_dynamic_tempo.rs diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 4688b1f22f..1990fe8968 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -975,7 +975,10 @@ pub mod pallet { pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - pallet_subtensor::Pallet::::set_tempo(netuid, tempo); + pallet_subtensor::Pallet::::set_tempo_unchecked(netuid, tempo); + // Cycle reset on every successful set_tempo + let now = pallet_subtensor::Pallet::::get_current_block_as_u64(); + pallet_subtensor::LastEpochBlock::::insert(netuid, now); log::debug!("TempoSet( netuid: {netuid:?} tempo: {tempo:?} ) "); Ok(()) } diff --git a/pallets/subtensor/src/coinbase/block_step.rs b/pallets/subtensor/src/coinbase/block_step.rs index fac924ccf4..0eadbf5bf2 100644 --- a/pallets/subtensor/src/coinbase/block_step.rs +++ b/pallets/subtensor/src/coinbase/block_step.rs @@ -36,9 +36,11 @@ impl Pallet { } fn try_set_pending_children(block_number: u64) { + // Called *after* `run_coinbase` has advanced `LastEpochBlock` for any + // subnet whose epoch slot fired this block — `should_run_epoch` is no + // longer true. Detect "epoch just fired" by `LastEpochBlock == block`. for netuid in Self::get_all_subnet_netuids() { - if Self::should_run_epoch(netuid, block_number) { - // Set pending children on the epoch. + if LastEpochBlock::::get(netuid) == block_number { Self::do_set_pending_children(netuid); } } diff --git a/pallets/subtensor/src/coinbase/mod.rs b/pallets/subtensor/src/coinbase/mod.rs index a5475674a7..d621d292b0 100644 --- a/pallets/subtensor/src/coinbase/mod.rs +++ b/pallets/subtensor/src/coinbase/mod.rs @@ -6,3 +6,4 @@ pub mod root; pub mod run_coinbase; pub mod subnet_emissions; pub mod tao; +pub mod tempo_control; diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index b2926323db..b0a1cf1c04 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -284,6 +284,9 @@ impl Pallet { MaxAllowedUids::::remove(netuid); ImmunityPeriod::::remove(netuid); ActivityCutoff::::remove(netuid); + ActivityCutoffFactorMilli::::remove(netuid); + LastEpochBlock::::remove(netuid); + PendingEpochAt::::remove(netuid); MinAllowedWeights::::remove(netuid); RegistrationsThisInterval::::remove(netuid); POWRegistrationsThisInterval::::remove(netuid); diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 60abfd1145..62a31a99c7 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -64,7 +64,14 @@ impl Pallet { let emissions_to_distribute = Self::drain_pending(&subnets, current_block); // --- 6. Distribute the emissions to the subnets. + // Bonds masking inside `distribute_emission` reads `LastMechansimStepBlock` and + // must see the previous successful run, so we delay the write until after. Self::distribute_emissions_to_subnets(&emissions_to_distribute); + + // --- 7. Mark each successful epoch run as the last mechanism step. + for netuid in emissions_to_distribute.keys() { + LastMechansimStepBlock::::insert(*netuid, current_block); + } } pub fn inject_and_maybe_swap( @@ -318,19 +325,35 @@ impl Pallet { NetUid, (AlphaBalance, AlphaBalance, AlphaBalance, AlphaBalance), > = BTreeMap::new(); - // --- Drain pending emissions for all subnets hat are at their tempo. - // Run the epoch for *all* subnets, even if we don't emit anything. + // Per-block cap on number of epochs that may run; the rest are deferred 1 block forward + // by setting `PendingEpochAt`. + let mut epochs_run_this_block: u32 = 0; + for &netuid in subnets.iter() { - // Increment blocks since last step. + // Increment blocks since last *successful* step (existing semantics). BlocksSinceLastStep::::mutate(netuid, |total| *total = total.saturating_add(1)); - // Run the epoch if applicable. - if Self::should_run_epoch(netuid, current_block) - && Self::is_epoch_input_state_consistent(netuid) - { - // Restart counters. + if !Self::should_run_epoch(netuid, current_block) { + continue; + } + + // Per-block cap — defer if already at limit. + if epochs_run_this_block >= MAX_EPOCHS_PER_BLOCK { + let next_block = current_block.saturating_add(1); + PendingEpochAt::::insert(netuid, next_block); + Self::deposit_event(Event::EpochDeferred { + netuid, + from_block: current_block, + to_block: next_block, + }); + continue; + } + + if Self::is_epoch_input_state_consistent(netuid) { + // Reset blocks-since counter; LastMechansimStepBlock is written + // post-distribute (see the caller), so bonds masking can read the + // previous successful run. BlocksSinceLastStep::::insert(netuid, 0); - LastMechansimStepBlock::::insert(netuid, current_block); // Get and drain the subnet pending emission. let pending_server_alpha = PendingServerEmission::::get(netuid); @@ -357,7 +380,19 @@ impl Pallet { owner_cut, ), ); + epochs_run_this_block = epochs_run_this_block.saturating_add(1); + } else { + // Schedule advances below; execution skipped. Pending emissions accumulate + // and will be drained by the next successful epoch. + Self::deposit_event(Event::EpochSkippedDueToInconsistentState { + netuid, + block: current_block, + }); } + + // Advance the schedule unconditionally — the slot is consumed. + LastEpochBlock::::insert(netuid, current_block); + PendingEpochAt::::insert(netuid, 0); } emissions_to_distribute } @@ -993,28 +1028,35 @@ impl Pallet { /// # Returns /// * `bool` - True if the epoch should run, false otherwise. pub fn should_run_epoch(netuid: NetUid, current_block: u64) -> bool { - Self::blocks_until_next_epoch(netuid, Self::get_tempo(netuid), current_block) == 0 + let tempo = Self::get_tempo(netuid); + if tempo == 0 { + return false; + } + let pending = PendingEpochAt::::get(netuid); + if pending > 0 && current_block >= pending { + return true; + } + if BlocksSinceLastStep::::get(netuid) > MAX_TEMPO as u64 { + return true; + } + let last = LastEpochBlock::::get(netuid); + let blocks_since = current_block.saturating_sub(last); + blocks_since > tempo as u64 } - /// Helper function which returns the number of blocks remaining before we will run the epoch on this - /// network. Networks run their epoch when (block_number + netuid + 1 ) % (tempo + 1) = 0 - /// tempo | netuid | # first epoch block - /// 1 0 0 - /// 1 1 1 - /// 2 0 1 - /// 2 1 0 - /// 100 0 99 - /// 100 1 98 - /// Special case: tempo = 0, the network never runs. - /// + /// Returns the number of blocks remaining before the next automatic epoch under the + /// stateful scheduler (period `tempo + 1`, anchored on `LastEpochBlock`). Used by the + /// admin-freeze-window predicate and external tooling. Returns `u64::MAX` when + /// `tempo == 0` (legacy defensive short-circuit). pub fn blocks_until_next_epoch(netuid: NetUid, tempo: u16, block_number: u64) -> u64 { if tempo == 0 { return u64::MAX; } - let netuid_plus_one = (u16::from(netuid) as u64).saturating_add(1); - let tempo_plus_one = (tempo as u64).saturating_add(1); - let adjusted_block = block_number.wrapping_add(netuid_plus_one); - let remainder = adjusted_block.checked_rem(tempo_plus_one).unwrap_or(0); - (tempo as u64).saturating_sub(remainder) + let last = LastEpochBlock::::get(netuid); + // Period is `tempo + 1`: next firing at `last + tempo + 1`. + let next_auto = last + .saturating_add(tempo as u64) + .saturating_add(1); + next_auto.saturating_sub(block_number) } } diff --git a/pallets/subtensor/src/coinbase/tempo_control.rs b/pallets/subtensor/src/coinbase/tempo_control.rs new file mode 100644 index 0000000000..9b7624a233 --- /dev/null +++ b/pallets/subtensor/src/coinbase/tempo_control.rs @@ -0,0 +1,109 @@ +use super::*; +use crate::Error; +use frame_support::pallet_prelude::DispatchResult; +use sp_runtime::DispatchError; +use subtensor_runtime_common::NetUid; + +use crate::system::pallet_prelude::OriginFor; +use crate::utils::rate_limiting::{Hyperparameter, TransactionType}; + +impl Pallet { + /// Owner-side `set_tempo` implementation. See spec §5.1. + pub fn do_set_tempo( + origin: OriginFor, + netuid: NetUid, + tempo: u16, + ) -> DispatchResult { + let who = Self::ensure_subnet_owner(origin, netuid)?; + + ensure!( + (MIN_TEMPO..=MAX_TEMPO).contains(&tempo), + Error::::TempoOutOfBounds + ); + + Self::ensure_admin_window_open(netuid)?; + + let tx = TransactionType::TempoUpdate; + ensure!( + tx.passes_rate_limit_on_subnet::(&who, netuid), + Error::::TxRateLimitExceeded + ); + + let now = Self::get_current_block_as_u64(); + + Tempo::::insert(netuid, tempo); + // Cycle reset on every successful set_tempo + LastEpochBlock::::insert(netuid, now); + + tx.set_last_block_on_subnet::(&who, netuid, now); + + Self::deposit_event(Event::TempoSet(netuid, tempo)); + Ok(()) + } + + /// Owner-side `set_activity_cutoff_factor` implementation. See spec §5.2. + pub fn do_set_activity_cutoff_factor( + origin: OriginFor, + netuid: NetUid, + factor_milli: u32, + ) -> DispatchResult { + let who = Self::ensure_subnet_owner(origin, netuid)?; + + ensure!( + (MIN_ACTIVITY_CUTOFF_FACTOR_MILLI..=MAX_ACTIVITY_CUTOFF_FACTOR_MILLI) + .contains(&factor_milli), + Error::::ActivityCutoffFactorMilliOutOfBounds + ); + + Self::ensure_admin_window_open(netuid)?; + + let tx = TransactionType::OwnerHyperparamUpdate(Hyperparameter::ActivityCutoffFactorMilli); + ensure!( + tx.passes_rate_limit_on_subnet::(&who, netuid), + Error::::TxRateLimitExceeded + ); + + let now = Self::get_current_block_as_u64(); + + Self::set_activity_cutoff_factor_milli(netuid, factor_milli); + tx.set_last_block_on_subnet::(&who, netuid, now); + + Ok(()) + } + + /// Owner-side `trigger_epoch` implementation. See spec §5.3. + /// Schedules the triggered epoch to fire after `AdminFreezeWindow` blocks; that + /// countdown engages the freeze window for the subnet via `is_in_admin_freeze_window`. + pub fn do_trigger_epoch( + origin: OriginFor, + netuid: NetUid, + ) -> Result<(), DispatchError> { + let who = Self::ensure_subnet_owner(origin, netuid)?; + + // No `ensure_admin_window_open` here: trigger *defines* the next epoch. + ensure!( + PendingEpochAt::::get(netuid) == 0, + Error::::EpochTriggerAlreadyPending + ); + + let tx = TransactionType::OwnerHyperparamUpdate(Hyperparameter::TriggerEpoch); + ensure!( + tx.passes_rate_limit_on_subnet::(&who, netuid), + Error::::TxRateLimitExceeded + ); + + let now = Self::get_current_block_as_u64(); + let window = AdminFreezeWindow::::get() as u64; + let fires_at = now.saturating_add(window); + + PendingEpochAt::::insert(netuid, fires_at); + tx.set_last_block_on_subnet::(&who, netuid, now); + + Self::deposit_event(Event::EpochTriggered { + netuid, + by: who, + fires_at, + }); + Ok(()) + } +} diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 962c5bbbb4..6aa7b2307e 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -169,7 +169,7 @@ impl Pallet { log::trace!("tempo: {tempo:?}"); // Get activity cutoff. - let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64; + let activity_cutoff: u64 = Self::get_activity_cutoff_blocks(netuid); log::trace!("activity_cutoff: {activity_cutoff:?}"); // Last update vector. @@ -205,7 +205,13 @@ impl Pallet { // Recently registered matrix, recently_ij=True if last_tempo was *before* j was last registered. // Mask if: the last tempo block happened *before* the registration block // ==> last_tempo <= registered - let last_tempo: u64 = current_block.saturating_sub(tempo); + // For dynamic tempo - we pick previous-successful-epoch block: `LastMechansimStepBlock + 1`) + let lms = LastMechansimStepBlock::::get(netuid); + let last_tempo: u64 = if lms == 0 { + current_block.saturating_sub(tempo) + } else { + lms.saturating_add(1) + }; let recently_registered: Vec = block_at_registration .iter() .map(|registered| last_tempo <= *registered) @@ -595,7 +601,7 @@ impl Pallet { log::trace!("tempo:\n{tempo:?}\n"); // Get activity cutoff. - let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64; + let activity_cutoff: u64 = Self::get_activity_cutoff_blocks(netuid); log::trace!("activity_cutoff: {activity_cutoff:?}"); // Last update vector. @@ -819,7 +825,13 @@ impl Pallet { // Remove bonds referring to neurons that have registered since last tempo. // Mask if: the last tempo block happened *before* the registration block // ==> last_tempo <= registered - let last_tempo: u64 = current_block.saturating_sub(tempo); + // For dynamic tempo - we pick previous-successful-epoch block: `LastMechansimStepBlock + 1`) + let lms = LastMechansimStepBlock::::get(netuid); + let last_tempo: u64 = if lms == 0 { + current_block.saturating_sub(tempo) + } else { + lms.saturating_add(1) + }; bonds = scalar_vec_mask_sparse_matrix( &bonds, last_tempo, @@ -859,7 +871,13 @@ impl Pallet { // Remove bonds referring to neurons that have registered since last tempo. // Mask if: the last tempo block happened *before* the registration block // ==> last_tempo <= registered - let last_tempo: u64 = current_block.saturating_sub(tempo); + // For dynamic tempo - we pick previous-successful-epoch block: `LastMechansimStepBlock + 1`) + let lms = LastMechansimStepBlock::::get(netuid); + let last_tempo: u64 = if lms == 0 { + current_block.saturating_sub(tempo) + } else { + lms.saturating_add(1) + }; bonds = scalar_vec_mask_sparse_matrix( &bonds, last_tempo, diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 75735c7471..6eda0000f7 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1731,6 +1731,45 @@ pub mod pallet { #[pallet::storage] pub type Tempo = StorageMap<_, Identity, NetUid, u16, ValueQuery, DefaultTempo>; + /// Lower bound for owner-set tempo. Also the fixed cooldown for `set_tempo`. + pub const MIN_TEMPO: u16 = 360; + /// Upper bound for owner-set tempo (≈ 7 days at 12 s/block). + pub const MAX_TEMPO: u16 = 50_400; + /// Lower bound for activity-cutoff factor (per-mille). 1_000 = one full tempo. + pub const MIN_ACTIVITY_CUTOFF_FACTOR_MILLI: u32 = 1_000; + /// Upper bound for activity-cutoff factor (per-mille). 50_000 = 50 tempos. + pub const MAX_ACTIVITY_CUTOFF_FACTOR_MILLI: u32 = 50_000; + /// Default activity-cutoff factor (per-mille). 13_889 ≈ legacy 5000-block cutoff + /// at default tempo 360 (`13_889 * 360 / 1000 = 5_000`, exact via ceiling rounding). + pub const INITIAL_ACTIVITY_CUTOFF_FACTOR_MILLI: u32 = 13_889; + /// Per-block cap on number of epochs that may execute in a single `block_step`. + pub const MAX_EPOCHS_PER_BLOCK: u32 = 2; + + /// Default value for activity-cutoff factor (per-mille). + #[pallet::type_value] + pub fn DefaultActivityCutoffFactorMilli() -> u32 { + INITIAL_ACTIVITY_CUTOFF_FACTOR_MILLI + } + + /// --- MAP ( netuid ) --> last epoch attempt block (consumed slot). + /// Drives normal-cadence scheduling and the admin freeze window. + /// Advances on every `should_run_epoch == true` slot — including consistency-skipped slots — + /// and on a successful `set_tempo` (cycle reset). + #[pallet::storage] + pub type LastEpochBlock = StorageMap<_, Identity, NetUid, u64, ValueQuery, DefaultZeroU64>; + + /// --- MAP ( netuid ) --> block at which a manually triggered epoch should fire. + /// `0` means no trigger pending. Cleared after the triggered epoch runs. + #[pallet::storage] + pub type PendingEpochAt = + StorageMap<_, Identity, NetUid, u64, ValueQuery, DefaultZeroU64>; + + /// --- MAP ( netuid ) --> activity-cutoff factor in per-mille epochs (1/1000 granularity). + /// Effective cutoff in blocks = `(factor × tempo) / 1000`, clamped to ≥ 1. + #[pallet::storage] + pub type ActivityCutoffFactorMilli = + StorageMap<_, Identity, NetUid, u32, ValueQuery, DefaultActivityCutoffFactorMilli>; + /// ============================ /// ==== Subnet Parameters ===== /// ============================ diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index a98578d813..668ce70b68 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -2594,5 +2594,46 @@ mod dispatches { let coldkey = ensure_signed(origin)?; Self::do_move_lock(&coldkey, &destination_hotkey, netuid) } + + /// Owner-side `set_tempo`. Validates `[MinTempo, MaxTempo]`, applies a fixed + /// `MinTempo`-block cooldown via `TransactionType::TempoUpdate`, respects the admin + /// freeze window, and resets the cycle (`LastEpochBlock = current_block`) on success. + #[pallet::call_index(139)] + #[pallet::weight(Weight::from_parts(20_000, 0) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)))] // TODO: add benchmarks and update weights + pub fn set_tempo( + origin: OriginFor, + netuid: NetUid, + tempo: u16, + ) -> DispatchResult { + Self::do_set_tempo(origin, netuid, tempo) + } + + /// Owner-side `set_activity_cutoff_factor`. Per-mille (1/1000) units; `cutoff_blocks + /// = (factor × tempo) / 1000`. Validates `[MinActivityCutoffFactorMilli, + /// MaxActivityCutoffFactorMilli]`, rate-limited via the existing + /// `OwnerHyperparamUpdate` pattern, respects the admin freeze window. + #[pallet::call_index(140)] + #[pallet::weight(Weight::from_parts(15_000, 0) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)))] // TODO: add benchmarks and update weights + pub fn set_activity_cutoff_factor( + origin: OriginFor, + netuid: NetUid, + factor_milli: u32, + ) -> DispatchResult { + Self::do_set_activity_cutoff_factor(origin, netuid, factor_milli) + } + + /// Owner-side `trigger_epoch`. Schedules an epoch to fire after `AdminFreezeWindow` + /// blocks. Rate-limited via the existing `OwnerHyperparamUpdate` pattern. + #[pallet::call_index(141)] + #[pallet::weight(Weight::from_parts(15_000, 0) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)))] // TODO: add benchmarks and update weights + pub fn trigger_epoch(origin: OriginFor, netuid: NetUid) -> DispatchResult { + Self::do_trigger_epoch(origin, netuid) + } } } diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index cb120b56b5..e5537816cb 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -305,5 +305,11 @@ mod errors { CannotUseSystemAccount, /// Trying to unlock more than locked UnlockAmountTooHigh, + /// Tempo value out of `[MinTempo, MaxTempo]` bounds. + TempoOutOfBounds, + /// Activity-cutoff factor out of `[MinActivityCutoffFactorMilli, MaxActivityCutoffFactorMilli]` bounds. + ActivityCutoffFactorMilliOutOfBounds, + /// `trigger_epoch` called while a previously triggered epoch is still pending. + EpochTriggerAlreadyPending, } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index cdb37bb0dd..e6209ffa18 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -608,5 +608,37 @@ mod events { /// The subnet the lock is on. netuid: NetUid, }, + + /// Activity-cutoff factor (per-mille) set on a subnet by its owner. + ActivityCutoffFactorMilliSet(NetUid, u32), + + /// Owner manually triggered an epoch for their subnet. + EpochTriggered { + /// The subnet identifier. + netuid: NetUid, + /// The account that triggered the epoch. + by: T::AccountId, + /// The earliest block at which the triggered epoch may execute. + fires_at: u64, + }, + + /// An epoch slot was deferred to the next block due to the per-block epoch cap. + EpochDeferred { + /// The subnet identifier. + netuid: NetUid, + /// Block at which the epoch was originally scheduled. + from_block: u64, + /// Block to which the epoch was deferred. + to_block: u64, + }, + + /// `should_run_epoch` returned true but `is_epoch_input_state_consistent` returned false; + /// schedule advanced, epoch execution skipped. + EpochSkippedDueToInconsistentState { + /// The subnet identifier. + netuid: NetUid, + /// The block at which the slot was consumed. + block: u64, + }, } } diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index 6aa949ae45..94bca7e90b 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -172,7 +172,9 @@ mod hooks { // Fix RootClaimed overclaim caused by single-subnet hotkey swap bug .saturating_add(migrations::migrate_fix_root_claimed_overclaim::migrate_fix_root_claimed_overclaim::()) // Mint missing SubnetTAO and SubnetLocked into subnet accounts to make TotalIssuance match in balances and subtensor - .saturating_add(migrations::migrate_subnet_balances::migrate_subnet_balances::()); + .saturating_add(migrations::migrate_subnet_balances::migrate_subnet_balances::()) + // Seed LastEpochBlock for dynamic-tempo / owner-triggered-epochs feature + .saturating_add(migrations::migrate_dynamic_tempo::migrate_dynamic_tempo::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_dynamic_tempo.rs b/pallets/subtensor/src/migrations/migrate_dynamic_tempo.rs new file mode 100644 index 0000000000..0eba21cb24 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_dynamic_tempo.rs @@ -0,0 +1,119 @@ +use super::*; +use frame_support::{traits::Get, weights::Weight}; +use log; +use scale_info::prelude::string::String; + +/// One-shot migration for the dynamic-tempo / owner-triggered-epochs feature. +/// +/// 1. Back-fills `LastEpochBlock[netuid]` for every existing subnet so the first +/// post-upgrade epoch lands on the same block as the legacy modulo formula +/// `(block + netuid + 1) % (tempo + 1) == 0`. The new scheduler period is +/// `tempo + 1` (next firing at `LastEpochBlock + tempo + 1`). +/// 2. Defensively clamps `Tempo` values in `(0, MIN_TEMPO) ∪ (MAX_TEMPO, u16::MAX]` +/// into `[MIN_TEMPO, MAX_TEMPO]`. Subnets with `Tempo == 0` are left as-is — the +/// legacy short-circuit keeps them dormant and matches their pre-upgrade behaviour. +/// 3. Converts each subnet's existing `ActivityCutoff[netuid]` (absolute block count) +/// into `ActivityCutoffFactorMilli[netuid]` (per-mille of `tempo`) so that +/// `factor * tempo / 1000 ≈ old_cutoff` post-upgrade. Production defaults +/// (`tempo=360`, `cutoff=5000`) round-trip to 4999 blocks (1-block delta from +/// integer division, ≈0.02%). Out-of-range factors are clamped to +/// `[MIN_ACTIVITY_CUTOFF_FACTOR_MILLI, MAX_ACTIVITY_CUTOFF_FACTOR_MILLI]` — +/// extreme historical cutoffs may shift to the nearest representable factor. +pub fn migrate_dynamic_tempo() -> Weight { + let mig_name: Vec = b"dynamic_tempo_v1".to_vec(); + let mig_name_str = String::from_utf8_lossy(&mig_name); + + let mut total_weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(&mig_name) { + log::info!("Migration '{mig_name_str}' already executed - skipping"); + return total_weight; + } + + log::info!("Running migration '{mig_name_str}'"); + + let current_block = Pallet::::get_current_block_as_u64(); + let mut visited: u64 = 0; + let mut tempo_clamped: u64 = 0; + let mut last_epoch_seeded: u64 = 0; + let mut activity_factor_seeded: u64 = 0; + let mut activity_factor_clamped: u64 = 0; + let mut reads: u64 = 0; + let mut writes: u64 = 0; + + let netuids: Vec = Tempo::::iter_keys().collect(); + reads = reads.saturating_add(netuids.len() as u64); + + for netuid in netuids.into_iter() { + visited = visited.saturating_add(1); + let mut tempo = Tempo::::get(netuid); + reads = reads.saturating_add(1); + + if tempo == 0 { + // Legacy `tempo == 0` short-circuit preserved; do not seed `LastEpochBlock`. + continue; + } + + // Defensive bounds clamp. + let clamped = tempo.clamp(MIN_TEMPO, MAX_TEMPO); + if clamped != tempo { + tempo = clamped; + Tempo::::insert(netuid, tempo); + tempo_clamped = tempo_clamped.saturating_add(1); + writes = writes.saturating_add(1); + } + + // Compute next-epoch block under the *legacy* modulo formula and back-fill + // `LastEpochBlock` so the *new* formula yields the same next-epoch block. + // Legacy `blocks_until_next_epoch`: + // adjusted = current_block + netuid + 1 + // remainder = adjusted % (tempo + 1) + // blocks_until_next = tempo - remainder + // New formula: next firing at `LastEpochBlock + tempo + 1`. Solve for `LastEpochBlock`: + // LastEpochBlock = current_block + blocks_until_next - tempo - 1 + // = current_block - (tempo + 1 - blocks_until_next) + let netuid_plus_one = (u16::from(netuid) as u64).saturating_add(1); + let tempo_plus_one = (tempo as u64).saturating_add(1); + let adjusted = current_block.wrapping_add(netuid_plus_one); + let remainder = adjusted.checked_rem(tempo_plus_one).unwrap_or(0); + let blocks_until_next = (tempo as u64).saturating_sub(remainder); + let offset = tempo_plus_one.saturating_sub(blocks_until_next); + let last_epoch = current_block.saturating_sub(offset); + + LastEpochBlock::::insert(netuid, last_epoch); + last_epoch_seeded = last_epoch_seeded.saturating_add(1); + writes = writes.saturating_add(1); + + // Convert legacy absolute `ActivityCutoff` into per-mille `ActivityCutoffFactorMilli` + let old_cutoff = ActivityCutoff::::get(netuid) as u64; + reads = reads.saturating_add(1); + let tempo_u64 = tempo as u64; + let raw_factor = old_cutoff + .saturating_mul(1_000) + .saturating_add(tempo_u64.saturating_sub(1)) + .checked_div(tempo_u64) + .unwrap_or(INITIAL_ACTIVITY_CUTOFF_FACTOR_MILLI as u64); + let clamped = raw_factor + .max(MIN_ACTIVITY_CUTOFF_FACTOR_MILLI as u64) + .min(MAX_ACTIVITY_CUTOFF_FACTOR_MILLI as u64) as u32; + if clamped as u64 != raw_factor { + activity_factor_clamped = activity_factor_clamped.saturating_add(1); + } + ActivityCutoffFactorMilli::::insert(netuid, clamped); + activity_factor_seeded = activity_factor_seeded.saturating_add(1); + writes = writes.saturating_add(1); + } + + total_weight = total_weight.saturating_add(T::DbWeight::get().reads_writes(reads, writes)); + + log::info!( + "Dynamic tempo migration: visited={visited}, tempo_clamped={tempo_clamped}, last_epoch_seeded={last_epoch_seeded}, activity_factor_seeded={activity_factor_seeded}, activity_factor_clamped={activity_factor_clamped}" + ); + + HasMigrationRun::::insert(&mig_name, true); + total_weight = total_weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!("Migration '{mig_name_str}' completed"); + + total_weight +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index 9974fd0175..99aa9bfe41 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -5,6 +5,7 @@ use sp_io::KillStorageResult; use sp_io::hashing::twox_128; use sp_io::storage::clear_prefix; pub mod migrate_auto_stake_destination; +pub mod migrate_dynamic_tempo; pub mod migrate_clear_deprecated_registration_maps; pub mod migrate_coldkey_swap_scheduled; pub mod migrate_coldkey_swap_scheduled_to_announcements; diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 0d439c21f1..67c2254eb4 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -307,6 +307,12 @@ impl Pallet { // --- 3. Fill tempo memory item. Tempo::::insert(netuid, tempo); + // --- 3.1. Initialise `LastEpochBlock` with a per-netuid stagger + let now = Self::get_current_block_as_u64(); + let period = (tempo as u64).saturating_add(1).max(1); + let stagger = (u16::from(netuid) as u64) % period; + LastEpochBlock::::insert(netuid, now.saturating_sub(stagger)); + // --- 4. Increase total network count. TotalNetworks::::mutate(|n| *n = n.saturating_add(1)); diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index f6b24db36b..aa03a4cd63 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -54,12 +54,17 @@ impl Pallet { /// Returns true if the current block is within the terminal freeze window of the tempo for the /// given subnet. During this window, admin ops are prohibited to avoid interference with - /// validator weight submissions. + /// validator weight submissions. Engages immediately on a pending manual trigger (so the trigger + /// arms the freeze for the entire countdown to `PendingEpochAt`). pub fn is_in_admin_freeze_window(netuid: NetUid, current_block: u64) -> bool { let tempo = Self::get_tempo(netuid); if tempo == 0 { return false; } + let pending = PendingEpochAt::::get(netuid); + if pending > 0 && pending > current_block { + return true; + } let remaining = Self::blocks_until_next_epoch(netuid, tempo, current_block); let window = AdminFreezeWindow::::get() as u64; remaining < window @@ -102,7 +107,11 @@ impl Pallet { // ======================== // ==== Global Setters ==== // ======================== - pub fn set_tempo(netuid: NetUid, tempo: u16) { + /// Unchecked tempo write used by tests, precompiles, and internal helpers. + /// Does NOT reset `LastEpochBlock` — that is the responsibility of the owner-side + /// `set_tempo` extrinsic and `sudo_set_tempo` (root), both of which perform the cycle + /// reset explicitly. + pub fn set_tempo_unchecked(netuid: NetUid, tempo: u16) { Tempo::::insert(netuid, tempo); Self::deposit_event(Event::TempoSet(netuid, tempo)); } @@ -572,6 +581,27 @@ impl Pallet { Self::deposit_event(Event::ActivityCutoffSet(netuid, activity_cutoff)); } + /// Effective activity cutoff in blocks, derived from `ActivityCutoffFactorMilli` and `Tempo`. + /// `cutoff_blocks = (factor × tempo) / 1000`, clamped to ≥ 1. + pub fn get_activity_cutoff_blocks(netuid: NetUid) -> u64 { + let factor_milli = ActivityCutoffFactorMilli::::get(netuid) as u64; + let tempo = Self::get_tempo(netuid) as u64; + factor_milli + .saturating_mul(tempo) + .checked_div(1000) + .unwrap_or(0) + .max(1) + } + + pub fn get_activity_cutoff_factor_milli(netuid: NetUid) -> u32 { + ActivityCutoffFactorMilli::::get(netuid) + } + + pub fn set_activity_cutoff_factor_milli(netuid: NetUid, factor_milli: u32) { + ActivityCutoffFactorMilli::::insert(netuid, factor_milli); + Self::deposit_event(Event::ActivityCutoffFactorMilliSet(netuid, factor_milli)); + } + // Registration Toggle utils pub fn get_network_registration_allowed(netuid: NetUid) -> bool { NetworkRegistrationAllowed::::get(netuid) diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index f0c9243aa8..c662baaf63 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -17,6 +17,7 @@ pub enum TransactionType { MechanismEmission, MaxUidsTrimming, AddStakeBurn, + TempoUpdate, } impl TransactionType { @@ -46,6 +47,7 @@ impl TransactionType { } Self::SetSNOwnerHotkey => DefaultSetSNOwnerHotkeyRateLimit::::get(), Self::AddStakeBurn => Tempo::::get(netuid) as u64, + Self::TempoUpdate => MIN_TEMPO as u64, _ => self.rate_limit::(), } @@ -144,6 +146,7 @@ impl From for u16 { TransactionType::MechanismEmission => 8, TransactionType::MaxUidsTrimming => 9, TransactionType::AddStakeBurn => 10, + TransactionType::TempoUpdate => 11, } } } @@ -162,6 +165,7 @@ impl From for TransactionType { 8 => TransactionType::MechanismEmission, 9 => TransactionType::MaxUidsTrimming, 10 => TransactionType::AddStakeBurn, + 11 => TransactionType::TempoUpdate, _ => TransactionType::Unknown, } } @@ -204,6 +208,8 @@ pub enum Hyperparameter { MaxAllowedUids = 25, BurnHalfLife = 26, BurnIncreaseMult = 27, + ActivityCutoffFactorMilli = 28, + TriggerEpoch = 29, } impl Pallet { diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index e20b11a5aa..5db49e3a58 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -272,7 +272,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 403, + spec_version: 404, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 42b3e29d4749cde748a79fbe09940eb0e6c665b9 Mon Sep 17 00:00:00 2001 From: Evgeny Svirsky Date: Wed, 6 May 2026 13:35:47 +0200 Subject: [PATCH 02/11] clippy + fmt --- pallets/subtensor/src/coinbase/run_coinbase.rs | 4 +--- pallets/subtensor/src/coinbase/tempo_control.rs | 11 ++--------- pallets/subtensor/src/lib.rs | 3 ++- pallets/subtensor/src/macros/dispatches.rs | 6 +----- pallets/subtensor/src/migrations/mod.rs | 2 +- pallets/subtensor/src/subnets/subnet.rs | 2 +- 6 files changed, 8 insertions(+), 20 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 62a31a99c7..db34ea42de 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -1054,9 +1054,7 @@ impl Pallet { } let last = LastEpochBlock::::get(netuid); // Period is `tempo + 1`: next firing at `last + tempo + 1`. - let next_auto = last - .saturating_add(tempo as u64) - .saturating_add(1); + let next_auto = last.saturating_add(tempo as u64).saturating_add(1); next_auto.saturating_sub(block_number) } } diff --git a/pallets/subtensor/src/coinbase/tempo_control.rs b/pallets/subtensor/src/coinbase/tempo_control.rs index 9b7624a233..e81f99ea42 100644 --- a/pallets/subtensor/src/coinbase/tempo_control.rs +++ b/pallets/subtensor/src/coinbase/tempo_control.rs @@ -9,11 +9,7 @@ use crate::utils::rate_limiting::{Hyperparameter, TransactionType}; impl Pallet { /// Owner-side `set_tempo` implementation. See spec §5.1. - pub fn do_set_tempo( - origin: OriginFor, - netuid: NetUid, - tempo: u16, - ) -> DispatchResult { + pub fn do_set_tempo(origin: OriginFor, netuid: NetUid, tempo: u16) -> DispatchResult { let who = Self::ensure_subnet_owner(origin, netuid)?; ensure!( @@ -74,10 +70,7 @@ impl Pallet { /// Owner-side `trigger_epoch` implementation. See spec §5.3. /// Schedules the triggered epoch to fire after `AdminFreezeWindow` blocks; that /// countdown engages the freeze window for the subnet via `is_in_admin_freeze_window`. - pub fn do_trigger_epoch( - origin: OriginFor, - netuid: NetUid, - ) -> Result<(), DispatchError> { + pub fn do_trigger_epoch(origin: OriginFor, netuid: NetUid) -> Result<(), DispatchError> { let who = Self::ensure_subnet_owner(origin, netuid)?; // No `ensure_admin_window_open` here: trigger *defines* the next epoch. diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 6eda0000f7..b9df4fb8ef 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1756,7 +1756,8 @@ pub mod pallet { /// Advances on every `should_run_epoch == true` slot — including consistency-skipped slots — /// and on a successful `set_tempo` (cycle reset). #[pallet::storage] - pub type LastEpochBlock = StorageMap<_, Identity, NetUid, u64, ValueQuery, DefaultZeroU64>; + pub type LastEpochBlock = + StorageMap<_, Identity, NetUid, u64, ValueQuery, DefaultZeroU64>; /// --- MAP ( netuid ) --> block at which a manually triggered epoch should fire. /// `0` means no trigger pending. Cleared after the triggered epoch runs. diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 668ce70b68..f2228e3b99 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -2602,11 +2602,7 @@ mod dispatches { #[pallet::weight(Weight::from_parts(20_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)))] // TODO: add benchmarks and update weights - pub fn set_tempo( - origin: OriginFor, - netuid: NetUid, - tempo: u16, - ) -> DispatchResult { + pub fn set_tempo(origin: OriginFor, netuid: NetUid, tempo: u16) -> DispatchResult { Self::do_set_tempo(origin, netuid, tempo) } diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index 99aa9bfe41..d8202f3546 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -5,7 +5,6 @@ use sp_io::KillStorageResult; use sp_io::hashing::twox_128; use sp_io::storage::clear_prefix; pub mod migrate_auto_stake_destination; -pub mod migrate_dynamic_tempo; pub mod migrate_clear_deprecated_registration_maps; pub mod migrate_coldkey_swap_scheduled; pub mod migrate_coldkey_swap_scheduled_to_announcements; @@ -17,6 +16,7 @@ pub mod migrate_crv3_v2_to_timelocked; pub mod migrate_delete_subnet_21; pub mod migrate_delete_subnet_3; pub mod migrate_disable_commit_reveal; +pub mod migrate_dynamic_tempo; pub mod migrate_fix_bad_hk_swap; pub mod migrate_fix_childkeys; pub mod migrate_fix_is_network_member; diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 67c2254eb4..60f83ff8f1 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -310,7 +310,7 @@ impl Pallet { // --- 3.1. Initialise `LastEpochBlock` with a per-netuid stagger let now = Self::get_current_block_as_u64(); let period = (tempo as u64).saturating_add(1).max(1); - let stagger = (u16::from(netuid) as u64) % period; + let stagger = (u16::from(netuid) as u64).checked_rem(period).unwrap_or(0); LastEpochBlock::::insert(netuid, now.saturating_sub(stagger)); // --- 4. Increase total network count. From c6567e2de4ff5116997bbe3e08d24ba60f7de4c1 Mon Sep 17 00:00:00 2001 From: Evgeny Svirsky Date: Wed, 6 May 2026 14:04:00 +0200 Subject: [PATCH 03/11] clean up --- pallets/subtensor/src/coinbase/tempo_control.rs | 6 +++--- pallets/subtensor/src/epoch/run_epoch.rs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pallets/subtensor/src/coinbase/tempo_control.rs b/pallets/subtensor/src/coinbase/tempo_control.rs index e81f99ea42..9e694854db 100644 --- a/pallets/subtensor/src/coinbase/tempo_control.rs +++ b/pallets/subtensor/src/coinbase/tempo_control.rs @@ -8,7 +8,7 @@ use crate::system::pallet_prelude::OriginFor; use crate::utils::rate_limiting::{Hyperparameter, TransactionType}; impl Pallet { - /// Owner-side `set_tempo` implementation. See spec §5.1. + /// Owner-side `set_tempo` implementation. pub fn do_set_tempo(origin: OriginFor, netuid: NetUid, tempo: u16) -> DispatchResult { let who = Self::ensure_subnet_owner(origin, netuid)?; @@ -37,7 +37,7 @@ impl Pallet { Ok(()) } - /// Owner-side `set_activity_cutoff_factor` implementation. See spec §5.2. + /// Owner-side `set_activity_cutoff_factor` implementation. pub fn do_set_activity_cutoff_factor( origin: OriginFor, netuid: NetUid, @@ -67,7 +67,7 @@ impl Pallet { Ok(()) } - /// Owner-side `trigger_epoch` implementation. See spec §5.3. + /// Owner-side `trigger_epoch` implementation. /// Schedules the triggered epoch to fire after `AdminFreezeWindow` blocks; that /// countdown engages the freeze window for the subnet via `is_in_admin_freeze_window`. pub fn do_trigger_epoch(origin: OriginFor, netuid: NetUid) -> Result<(), DispatchError> { diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 6aa7b2307e..ec668c1eb9 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -205,7 +205,7 @@ impl Pallet { // Recently registered matrix, recently_ij=True if last_tempo was *before* j was last registered. // Mask if: the last tempo block happened *before* the registration block // ==> last_tempo <= registered - // For dynamic tempo - we pick previous-successful-epoch block: `LastMechansimStepBlock + 1`) + // For dynamic tempo - we pick previous-successful-epoch block: `LastMechansimStepBlock + 1` let lms = LastMechansimStepBlock::::get(netuid); let last_tempo: u64 = if lms == 0 { current_block.saturating_sub(tempo) @@ -825,7 +825,7 @@ impl Pallet { // Remove bonds referring to neurons that have registered since last tempo. // Mask if: the last tempo block happened *before* the registration block // ==> last_tempo <= registered - // For dynamic tempo - we pick previous-successful-epoch block: `LastMechansimStepBlock + 1`) + // For dynamic tempo - we pick previous-successful-epoch block: `LastMechansimStepBlock + 1` let lms = LastMechansimStepBlock::::get(netuid); let last_tempo: u64 = if lms == 0 { current_block.saturating_sub(tempo) @@ -871,7 +871,7 @@ impl Pallet { // Remove bonds referring to neurons that have registered since last tempo. // Mask if: the last tempo block happened *before* the registration block // ==> last_tempo <= registered - // For dynamic tempo - we pick previous-successful-epoch block: `LastMechansimStepBlock + 1`) + // For dynamic tempo - we pick previous-successful-epoch block: `LastMechansimStepBlock + 1` let lms = LastMechansimStepBlock::::get(netuid); let last_tempo: u64 = if lms == 0 { current_block.saturating_sub(tempo) From ad7ba80dfd90a07636b6375cd664ec40eebb2a48 Mon Sep 17 00:00:00 2001 From: Evgeny Svirsky Date: Thu, 7 May 2026 11:29:34 +0200 Subject: [PATCH 04/11] Renamed function + updated comment --- pallets/subtensor/src/coinbase/run_coinbase.rs | 9 ++++++--- pallets/subtensor/src/utils/misc.rs | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index db34ea42de..9c5e043ed3 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -1045,10 +1045,13 @@ impl Pallet { } /// Returns the number of blocks remaining before the next automatic epoch under the - /// stateful scheduler (period `tempo + 1`, anchored on `LastEpochBlock`). Used by the - /// admin-freeze-window predicate and external tooling. Returns `u64::MAX` when + /// stateful scheduler (period `tempo + 1`, anchored on `LastEpochBlock`). Does NOT account for: + /// - `PendingEpochAt` (owner-triggered manual fire — could happen sooner), + /// - `BlocksSinceLastStep > MAX_TEMPO` safety-net, + /// - per-block-cap defer (could push the actual fire one or more blocks later) + /// Used by the admin-freeze-window predicate and external tooling. Returns `u64::MAX` when /// `tempo == 0` (legacy defensive short-circuit). - pub fn blocks_until_next_epoch(netuid: NetUid, tempo: u16, block_number: u64) -> u64 { + pub fn blocks_until_next_auto_epoch(netuid: NetUid, tempo: u16, block_number: u64) -> u64 { if tempo == 0 { return u64::MAX; } diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index aa03a4cd63..7861f06929 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -65,7 +65,7 @@ impl Pallet { if pending > 0 && pending > current_block { return true; } - let remaining = Self::blocks_until_next_epoch(netuid, tempo, current_block); + let remaining = Self::blocks_until_next_auto_epoch(netuid, tempo, current_block); let window = AdminFreezeWindow::::get() as u64; remaining < window } From df184e33c7294b47e2c1f34581c57150953092f1 Mon Sep 17 00:00:00 2001 From: Evgeny Svirsky Date: Thu, 7 May 2026 12:00:22 +0200 Subject: [PATCH 05/11] wrap set tempo + cycle reset with a helper function --- pallets/admin-utils/src/lib.rs | 5 +---- pallets/subtensor/src/coinbase/tempo_control.rs | 6 +----- pallets/subtensor/src/utils/misc.rs | 9 +++++++++ 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 1990fe8968..64178e8838 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -975,10 +975,7 @@ pub mod pallet { pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - pallet_subtensor::Pallet::::set_tempo_unchecked(netuid, tempo); - // Cycle reset on every successful set_tempo - let now = pallet_subtensor::Pallet::::get_current_block_as_u64(); - pallet_subtensor::LastEpochBlock::::insert(netuid, now); + pallet_subtensor::Pallet::::apply_tempo_with_cycle_reset(netuid, tempo); log::debug!("TempoSet( netuid: {netuid:?} tempo: {tempo:?} ) "); Ok(()) } diff --git a/pallets/subtensor/src/coinbase/tempo_control.rs b/pallets/subtensor/src/coinbase/tempo_control.rs index 9e694854db..6e3f325d41 100644 --- a/pallets/subtensor/src/coinbase/tempo_control.rs +++ b/pallets/subtensor/src/coinbase/tempo_control.rs @@ -27,13 +27,9 @@ impl Pallet { let now = Self::get_current_block_as_u64(); - Tempo::::insert(netuid, tempo); - // Cycle reset on every successful set_tempo - LastEpochBlock::::insert(netuid, now); + Self::apply_tempo_with_cycle_reset(netuid, tempo); tx.set_last_block_on_subnet::(&who, netuid, now); - - Self::deposit_event(Event::TempoSet(netuid, tempo)); Ok(()) } diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 7861f06929..8aeaa90d9f 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -115,6 +115,15 @@ impl Pallet { Tempo::::insert(netuid, tempo); Self::deposit_event(Event::TempoSet(netuid, tempo)); } + + /// Sets `Tempo` and resets the state-based scheduler anchor `LastEpochBlock` + /// to the current block + pub fn apply_tempo_with_cycle_reset(netuid: NetUid, tempo: u16) { + Self::set_tempo_unchecked(netuid, tempo); + let now = Self::get_current_block_as_u64(); + LastEpochBlock::::insert(netuid, now); + } + pub fn set_last_adjustment_block(netuid: NetUid, last_adjustment_block: u64) { LastAdjustmentBlock::::insert(netuid, last_adjustment_block); } From b2e4658f914fd530332fc649d57ba2b3e9d30da0 Mon Sep 17 00:00:00 2001 From: Evgeny Svirsky Date: Thu, 7 May 2026 16:18:04 +0200 Subject: [PATCH 06/11] tests --- eco-tests/src/helpers.rs | 2 +- pallets/admin-utils/src/tests/mod.rs | 16 ++-- pallets/subtensor/src/tests/children.rs | 3 + pallets/subtensor/src/tests/claim_root.rs | 7 ++ pallets/subtensor/src/tests/coinbase.rs | 37 +++++---- pallets/subtensor/src/tests/emission.rs | 91 ++++++++++++++++------- pallets/subtensor/src/tests/ensure.rs | 21 ++++-- pallets/subtensor/src/tests/epoch.rs | 12 +-- pallets/subtensor/src/tests/mock.rs | 18 ++--- pallets/subtensor/src/tests/weights.rs | 40 ++++++---- precompiles/src/neuron.rs | 2 +- 11 files changed, 164 insertions(+), 85 deletions(-) diff --git a/eco-tests/src/helpers.rs b/eco-tests/src/helpers.rs index c6fa0ec72d..c306ffc96f 100644 --- a/eco-tests/src/helpers.rs +++ b/eco-tests/src/helpers.rs @@ -106,7 +106,7 @@ pub fn run_to_block_no_epoch(netuid: NetUid, n: u64) { pub fn step_epochs(count: u16, netuid: NetUid) { for _ in 0..count { - let blocks_to_next_epoch = SubtensorModule::blocks_until_next_epoch( + let blocks_to_next_epoch = SubtensorModule::blocks_until_next_auto_epoch( netuid, SubtensorModule::get_tempo(netuid), SubtensorModule::get_current_block_as_u64(), diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index c94e1e96e8..7b28522aa9 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -1981,7 +1981,7 @@ fn test_sudo_set_admin_freeze_window_and_rate() { fn test_freeze_window_blocks_root_and_owner() { new_test_ext().execute_with(|| { let netuid = NetUid::from(1); - let tempo = 10; + let tempo: u16 = 10; // Create subnet with tempo 10 add_network(netuid, tempo); // Set freeze window to 3 blocks @@ -1989,8 +1989,12 @@ fn test_freeze_window_blocks_root_and_owner() { <::RuntimeOrigin>::root(), 3 )); - // Advance to a block where remaining < 3 - run_to_block((tempo - 2).into()); + // Pin the state-based scheduler so the next auto-epoch lands at + // `tempo + 1`. Freeze window covers blocks (next_auto - 3, next_auto]. + pallet_subtensor::LastEpochBlock::::insert(netuid, 0); + let next_auto = (tempo as u64).saturating_add(1); + // Advance to a block inside the freeze window (remaining < 3). + run_to_block(next_auto - 2); // Root should be blocked during freeze window assert_noop!( @@ -2086,7 +2090,7 @@ fn test_owner_hyperparam_update_rate_limit_enforced() { SubnetOwner::::insert(netuid, owner); // Set tempo to 1 so owner hyperparam RL = 2 tempos = 2 blocks - SubtensorModule::set_tempo(netuid, 1); + SubtensorModule::set_tempo_unchecked(netuid, 1); // Disable admin freeze window to avoid blocking on small tempo assert_ok!(AdminUtils::sudo_set_admin_freeze_window( <::RuntimeOrigin>::root(), @@ -2141,7 +2145,7 @@ fn test_hyperparam_rate_limit_enforced_by_tempo() { SubnetOwner::::insert(netuid, owner); // Set tempo to 1 so RL = 2 blocks - SubtensorModule::set_tempo(netuid, 1); + SubtensorModule::set_tempo_unchecked(netuid, 1); // Disable admin freeze window to avoid blocking on small tempo assert_ok!(AdminUtils::sudo_set_admin_freeze_window( <::RuntimeOrigin>::root(), @@ -2189,7 +2193,7 @@ fn test_owner_hyperparam_rate_limit_independent_per_param() { SubnetOwner::::insert(netuid, owner); // Use small tempo to make RL short and deterministic (2 blocks when tempo=1) - SubtensorModule::set_tempo(netuid, 1); + SubtensorModule::set_tempo_unchecked(netuid, 1); // Disable admin freeze window so it doesn't interfere with small tempo assert_ok!(AdminUtils::sudo_set_admin_freeze_window( <::RuntimeOrigin>::root(), diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index a7d4b1b273..0fad2dc4c8 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -3098,6 +3098,9 @@ fn test_parent_child_chain_emission() { PendingValidatorEmission::::insert(netuid, AlphaBalance::ZERO); PendingServerEmission::::insert(netuid, AlphaBalance::ZERO); + // To trigger the epoch, block should be > tempo. So we advance it before + System::set_block_number(2); + // Run epoch with emission value let emission_value = u64::from(emission.peek()); SubtensorModule::run_coinbase(emission); diff --git a/pallets/subtensor/src/tests/claim_root.rs b/pallets/subtensor/src/tests/claim_root.rs index bd5761f376..f8ce465ea1 100644 --- a/pallets/subtensor/src/tests/claim_root.rs +++ b/pallets/subtensor/src/tests/claim_root.rs @@ -806,6 +806,9 @@ fn test_claim_root_with_run_coinbase() { .into(); assert_eq!(initial_stake, 0u64); + // To trigger the epoch, block should be > tempo. So we advance it before + System::set_block_number(2); + let block_emissions = SubtensorModule::mint_tao(1_000_000u64.into()); SubtensorModule::run_coinbase(block_emissions); @@ -992,6 +995,7 @@ fn test_populate_staking_maps() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::claim_root::test_claim_root_coinbase_distribution --exact --show-output #[test] fn test_claim_root_coinbase_distribution() { new_test_ext(1).execute_with(|| { @@ -1001,6 +1005,9 @@ fn test_claim_root_coinbase_distribution() { let netuid = add_dynamic_network(&hotkey, &owner_coldkey); Tempo::::insert(netuid, 1); + // Re-anchor the state-based scheduler at the current block + // The 2nd step will fire the tempo + crate::LastEpochBlock::::insert(netuid, SubtensorModule::get_current_block_as_u64()); SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 let root_stake = 200_000_000u64; diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index 6199aa9952..8635e6b320 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -654,7 +654,7 @@ fn test_owner_cut_base() { 1_000_000_000_000_u64.into(), 1_000_000_000_000_u64.into(), ); - SubtensorModule::set_tempo(netuid, 10000); // Large number (dont drain) + SubtensorModule::set_tempo_unchecked(netuid, 10000); // Large number (dont drain) SubtensorModule::set_subnet_owner_cut(0); SubtensorModule::run_coinbase(SubtensorModule::mint_tao(0.into())); assert_eq!(PendingOwnerCut::::get(netuid), 0.into()); // No cut @@ -664,7 +664,7 @@ fn test_owner_cut_base() { }); } -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_pending_swapped --exact --show-output --nocapture +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_pending_emission --exact --show-output --nocapture #[test] fn test_pending_emission() { new_test_ext(1).execute_with(|| { @@ -676,10 +676,13 @@ fn test_pending_emission() { FirstEmissionBlockNumber::::insert(netuid, 0); mock::setup_reserves(netuid, 1_000_000.into(), 1.into()); + LastEpochBlock::::insert(netuid, 0); + System::set_block_number(10); SubtensorModule::run_coinbase(SubtensorModule::mint_tao(0.into())); SubnetTAO::::insert(NetUid::ROOT, TaoBalance::from(1_000_000_000)); // Add root weight. + System::set_block_number(12); SubtensorModule::run_coinbase(SubtensorModule::mint_tao(0.into())); - SubtensorModule::set_tempo(netuid, 10000); // Large number (dont drain) + SubtensorModule::set_tempo_unchecked(netuid, 10000); // Large number (dont drain) SubtensorModule::set_tao_weight(u64::MAX); // Set TAO weight to 1.0 // Set moving price > 1.0 @@ -2456,7 +2459,7 @@ fn test_distribute_emission_zero_emission() { let miner_ck = U256::from(6); let init_stake: u64 = 100_000_000_000_000; let tempo = 2; - SubtensorModule::set_tempo(netuid, tempo); + SubtensorModule::set_tempo_unchecked(netuid, tempo); // Set weight-set limit to 0. SubtensorModule::set_weights_set_rate_limit(netuid, 0); @@ -2544,7 +2547,7 @@ fn test_run_coinbase_not_started() { let miner_ck = U256::from(6); let init_stake: u64 = 100_000_000_000_000; let tempo = 2; - SubtensorModule::set_tempo(netuid, tempo); + SubtensorModule::set_tempo_unchecked(netuid, tempo); // Set weight-set limit to 0. SubtensorModule::set_weights_set_rate_limit(netuid, 0); @@ -2639,7 +2642,7 @@ fn test_run_coinbase_not_started_start_after() { let miner_ck = U256::from(6); let init_stake: u64 = 100_000_000_000_000; let tempo = 2; - SubtensorModule::set_tempo(netuid, tempo); + SubtensorModule::set_tempo_unchecked(netuid, tempo); // Set weight-set limit to 0. SubtensorModule::set_weights_set_rate_limit(netuid, 0); @@ -2707,6 +2710,12 @@ fn test_run_coinbase_not_started_start_after() { Some(current_block + 1) ); + // Advance the block past `LastEpochBlock + tempo` so the state-based + // scheduler is due again (the previous `run_coinbase` advanced it). + next_block_no_epoch(netuid); + next_block_no_epoch(netuid); + next_block_no_epoch(netuid); + // Run coinbase with emission. let emission_credit = SubtensorModule::mint_tao(100_000_000.into()); SubtensorModule::run_coinbase(emission_credit); @@ -2970,6 +2979,7 @@ fn test_zero_shares_zero_emission() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_mining_emission_distribution_with_no_root_sell --exact --show-output --nocapture #[test] fn test_mining_emission_distribution_with_no_root_sell() { new_test_ext(1).execute_with(|| { @@ -3097,13 +3107,14 @@ fn test_mining_emission_distribution_with_no_root_sell() { AlphaBalance::ZERO, "Root alpha divs should be zero" ); + step_block(1); let miner_stake_before_epoch = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( &miner_hotkey, &miner_coldkey, netuid, ); // Run again but with some root stake - step_block(subnet_tempo - 2); + step_block(subnet_tempo); assert_abs_diff_eq!( PendingServerEmission::::get(netuid).to_u64(), U96F32::saturating_from_num(per_block_emission) @@ -3273,6 +3284,7 @@ fn test_mining_emission_distribution_with_root_sell() { // Run run_coinbase until emissions are drained step_block(subnet_tempo); + LastEpochBlock::::insert(netuid, SubtensorModule::get_current_block_as_u64()); let old_root_alpha_divs = PendingRootAlphaDivs::::get(netuid); let miner_stake_before_epoch = SubtensorModule::get_stake_for_hotkey_and_coldkey_on_subnet( &miner_hotkey, @@ -3582,8 +3594,8 @@ fn test_coinbase_drain_pending_resets_blockssincelaststep() { let zero = U96F32::saturating_from_num(0); let netuid0 = add_dynamic_network(&U256::from(1), &U256::from(2)); Tempo::::insert(netuid0, 100); - // Ensure the block number we use is the tempo block - let block_number = 98; + LastEpochBlock::::insert(netuid0, 0); + let block_number = 102; assert!(SubtensorModule::should_run_epoch(netuid0, block_number)); let blocks_since_last_step_before = 12345678; @@ -3595,8 +3607,7 @@ fn test_coinbase_drain_pending_resets_blockssincelaststep() { let blocks_since_last_step_after = BlocksSinceLastStep::::get(netuid0); assert_eq!(blocks_since_last_step_after, 0); - // Also check LastMechansimStepBlock is set to the block number we ran on - assert_eq!(LastMechansimStepBlock::::get(netuid0), block_number); + assert_eq!(LastMechansimStepBlock::::get(netuid0), 12345); }); } @@ -3606,8 +3617,8 @@ fn test_coinbase_drain_pending_gets_counters_and_resets_them() { let zero = U96F32::saturating_from_num(0); let netuid0 = add_dynamic_network(&U256::from(1), &U256::from(2)); Tempo::::insert(netuid0, 100); - // Ensure the block number we use is the tempo block - let block_number = 98; + LastEpochBlock::::insert(netuid0, 0); + let block_number = 102; assert!(SubtensorModule::should_run_epoch(netuid0, block_number)); let pending_server_em = AlphaBalance::from(123434534); diff --git a/pallets/subtensor/src/tests/emission.rs b/pallets/subtensor/src/tests/emission.rs index ecd2df544b..4eef1a97f2 100644 --- a/pallets/subtensor/src/tests/emission.rs +++ b/pallets/subtensor/src/tests/emission.rs @@ -1,6 +1,7 @@ use subtensor_runtime_common::NetUid; use super::mock::*; +use crate::LastEpochBlock; // 1. Test Zero Tempo // Description: Verify that when tempo is 0, the function returns u64::MAX. @@ -9,7 +10,7 @@ use super::mock::*; fn test_zero_tempo() { new_test_ext(1).execute_with(|| { assert_eq!( - SubtensorModule::blocks_until_next_epoch(1.into(), 0, 100), + SubtensorModule::blocks_until_next_auto_epoch(1.into(), 0, 100), u64::MAX ); }); @@ -21,14 +22,21 @@ fn test_zero_tempo() { #[test] fn test_regular_case() { new_test_ext(1).execute_with(|| { - assert_eq!(SubtensorModule::blocks_until_next_epoch(1.into(), 10, 5), 3); + LastEpochBlock::::insert(NetUid::from(1), 0); + LastEpochBlock::::insert(NetUid::from(2), 0); + LastEpochBlock::::insert(NetUid::from(3), 0); + // tempo + 1 - block. assert_eq!( - SubtensorModule::blocks_until_next_epoch(2.into(), 20, 15), - 2 + SubtensorModule::blocks_until_next_auto_epoch(1.into(), 10, 5), + 6 + ); + assert_eq!( + SubtensorModule::blocks_until_next_auto_epoch(2.into(), 20, 15), + 6 ); assert_eq!( - SubtensorModule::blocks_until_next_epoch(3.into(), 30, 25), - 1 + SubtensorModule::blocks_until_next_auto_epoch(3.into(), 30, 25), + 6 ); }); } @@ -39,13 +47,17 @@ fn test_regular_case() { #[test] fn test_boundary_conditions() { new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(u16::MAX); + LastEpochBlock::::insert(netuid, 0); + // Far past the next-auto block — saturating to 0. assert_eq!( - SubtensorModule::blocks_until_next_epoch(u16::MAX.into(), u16::MAX, u64::MAX), + SubtensorModule::blocks_until_next_auto_epoch(netuid, u16::MAX, u64::MAX), 0 ); + // Block 0 — full period until next auto epoch. assert_eq!( - SubtensorModule::blocks_until_next_epoch(u16::MAX.into(), u16::MAX, 0), - u16::MAX as u64 + SubtensorModule::blocks_until_next_auto_epoch(netuid, u16::MAX, 0), + (u16::MAX as u64).saturating_add(1) ); }); } @@ -56,9 +68,11 @@ fn test_boundary_conditions() { #[test] fn test_overflow_handling() { new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(u16::MAX); + LastEpochBlock::::insert(netuid, 0); assert_eq!( - SubtensorModule::blocks_until_next_epoch(u16::MAX.into(), u16::MAX, u64::MAX - 1), - 1 + SubtensorModule::blocks_until_next_auto_epoch(netuid, u16::MAX, u64::MAX - 1), + 0 ); }); } @@ -69,13 +83,17 @@ fn test_overflow_handling() { #[test] fn test_epoch_alignment() { new_test_ext(1).execute_with(|| { + LastEpochBlock::::insert(NetUid::from(1), 0); + LastEpochBlock::::insert(NetUid::from(2), 0); + // tempo + 1 - block_number. assert_eq!( - SubtensorModule::blocks_until_next_epoch(1.into(), 10, 9), - 10 + SubtensorModule::blocks_until_next_auto_epoch(1.into(), 10, 9), + 2 ); + // Block exactly at next-auto — returns 0. assert_eq!( - SubtensorModule::blocks_until_next_epoch(2.into(), 20, 21), - 17 + SubtensorModule::blocks_until_next_auto_epoch(2.into(), 20, 21), + 0 ); }); } @@ -86,9 +104,23 @@ fn test_epoch_alignment() { #[test] fn test_different_network_ids() { new_test_ext(1).execute_with(|| { - assert_eq!(SubtensorModule::blocks_until_next_epoch(1.into(), 10, 5), 3); - assert_eq!(SubtensorModule::blocks_until_next_epoch(2.into(), 10, 5), 2); - assert_eq!(SubtensorModule::blocks_until_next_epoch(3.into(), 10, 5), 1); + // Anchor each subnet identically — proves the new formula does NOT + // depend on `netuid` (only on the per-subnet `LastEpochBlock`). + LastEpochBlock::::insert(NetUid::from(1), 0); + LastEpochBlock::::insert(NetUid::from(2), 0); + LastEpochBlock::::insert(NetUid::from(3), 0); + assert_eq!( + SubtensorModule::blocks_until_next_auto_epoch(1.into(), 10, 5), + 6 + ); + assert_eq!( + SubtensorModule::blocks_until_next_auto_epoch(2.into(), 10, 5), + 6 + ); + assert_eq!( + SubtensorModule::blocks_until_next_auto_epoch(3.into(), 10, 5), + 6 + ); }); } @@ -98,9 +130,11 @@ fn test_different_network_ids() { #[test] fn test_large_tempo_values() { new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + LastEpochBlock::::insert(netuid, 0); assert_eq!( - SubtensorModule::blocks_until_next_epoch(1.into(), u16::MAX - 1, 100), - u16::MAX as u64 - 103 + SubtensorModule::blocks_until_next_auto_epoch(netuid, u16::MAX - 1, 100), + (u16::MAX as u64).saturating_sub(100) ); }); } @@ -113,9 +147,11 @@ fn test_consecutive_blocks() { new_test_ext(1).execute_with(|| { let tempo = 10; let netuid = NetUid::from(1); - let mut last_result = SubtensorModule::blocks_until_next_epoch(netuid, tempo, 0); + LastEpochBlock::::insert(netuid, 0); + let mut last_result = SubtensorModule::blocks_until_next_auto_epoch(netuid, tempo, 0); for i in 1..tempo - 1 { - let current_result = SubtensorModule::blocks_until_next_epoch(netuid, tempo, i as u64); + let current_result = + SubtensorModule::blocks_until_next_auto_epoch(netuid, tempo, i as u64); assert_eq!(current_result, last_result - 1); last_result = current_result; } @@ -128,13 +164,16 @@ fn test_consecutive_blocks() { #[test] fn test_wrap_around_behavior() { new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + LastEpochBlock::::insert(netuid, 0); + // `next_auto - block_number` saturates to 0 for far-future blocks. assert_eq!( - SubtensorModule::blocks_until_next_epoch(1.into(), 10, u64::MAX), - 9 + SubtensorModule::blocks_until_next_auto_epoch(netuid, 10, u64::MAX), + 0 ); assert_eq!( - SubtensorModule::blocks_until_next_epoch(1.into(), 10, u64::MAX - 1), - 10 + SubtensorModule::blocks_until_next_auto_epoch(netuid, 10, u64::MAX - 1), + 0 ); }); } diff --git a/pallets/subtensor/src/tests/ensure.rs b/pallets/subtensor/src/tests/ensure.rs index 1253285306..238eb99707 100644 --- a/pallets/subtensor/src/tests/ensure.rs +++ b/pallets/subtensor/src/tests/ensure.rs @@ -66,16 +66,21 @@ fn ensure_subnet_owner_or_root_distinguishes_root_and_owner() { fn ensure_admin_window_open_blocks_in_freeze_window() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(0); - let tempo = 10; - add_network(netuid, 10, 0); + let tempo: u16 = 10; + add_network(netuid, tempo, 0); - let freeze_window = 3; + let freeze_window: u16 = 3; crate::Pallet::::set_admin_freeze_window(freeze_window); - System::set_block_number((tempo - freeze_window).into()); + crate::LastEpochBlock::::insert(netuid, 0); + let next_auto = (tempo as u64).saturating_add(1); + + // Inside freeze window: `next_auto - freeze_window + 1`. + System::set_block_number(next_auto - freeze_window as u64 + 1); assert!(crate::Pallet::::ensure_admin_window_open(netuid).is_err()); - System::set_block_number((tempo - freeze_window - 1).into()); + // Outside freeze window: `next_auto - freeze_window`. + System::set_block_number(next_auto - freeze_window as u64); assert!(crate::Pallet::::ensure_admin_window_open(netuid).is_ok()); }); } @@ -93,7 +98,7 @@ fn ensure_owner_or_root_with_limits_checks_rl_and_freeze() { crate::Pallet::::set_admin_freeze_window(0); // Set tempo to 1 so owner hyperparam RL = 2 blocks - crate::Pallet::::set_tempo(netuid, 1); + crate::Pallet::::set_tempo_unchecked(netuid, 1); assert_eq!(OwnerHyperparamRateLimit::::get(), 2); @@ -135,12 +140,12 @@ fn ensure_owner_or_root_with_limits_checks_rl_and_freeze() { // (using loop for clarity, because epoch calculation function uses netuid) // Restore tempo and configure freeze window for this part let freeze_window = 3; - crate::Pallet::::set_tempo(netuid, tempo); + crate::Pallet::::set_tempo_unchecked(netuid, tempo); crate::Pallet::::set_admin_freeze_window(freeze_window); let freeze_window = freeze_window as u64; loop { let cur = crate::Pallet::::get_current_block_as_u64(); - let rem = crate::Pallet::::blocks_until_next_epoch(netuid, tempo, cur); + let rem = crate::Pallet::::blocks_until_next_auto_epoch(netuid, tempo, cur); if rem < freeze_window { break; } diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 02236d892d..9781a5a9c0 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -2052,14 +2052,14 @@ fn test_deregistered_miner_bonds() { } // Set tempo high so we don't automatically run epochs - SubtensorModule::set_tempo(netuid, high_tempo); + SubtensorModule::set_tempo_unchecked(netuid, high_tempo); // Run 2 blocks next_block(); next_block(); // set tempo to 2 blocks - SubtensorModule::set_tempo(netuid, 2); + SubtensorModule::set_tempo_unchecked(netuid, 2); // Run epoch if sparse { SubtensorModule::epoch(netuid, 1_000_000_000.into()); @@ -2077,7 +2077,7 @@ fn test_deregistered_miner_bonds() { assert!(bond_0_3 > 0); // Set tempo high so we don't automatically run epochs - SubtensorModule::set_tempo(netuid, high_tempo); + SubtensorModule::set_tempo_unchecked(netuid, high_tempo); // Run one more block next_block(); @@ -2137,7 +2137,7 @@ fn test_deregistered_miner_bonds() { ); // set tempo to 2 blocks - SubtensorModule::set_tempo(netuid, 2); + SubtensorModule::set_tempo_unchecked(netuid, 2); // Run epoch again. if sparse { SubtensorModule::epoch(netuid, 1_000_000_000.into()); @@ -2465,7 +2465,7 @@ fn test_blocks_since_last_step() { assert!(new_blocks > original_blocks); assert_eq!(new_blocks, 5); - let blocks_to_step: u16 = SubtensorModule::blocks_until_next_epoch( + let blocks_to_step: u16 = SubtensorModule::blocks_until_next_auto_epoch( netuid, tempo, SubtensorModule::get_current_block_as_u64(), @@ -2477,7 +2477,7 @@ fn test_blocks_since_last_step() { assert_eq!(post_blocks, 10); - let blocks_to_step: u16 = SubtensorModule::blocks_until_next_epoch( + let blocks_to_step: u16 = SubtensorModule::blocks_until_next_auto_epoch( netuid, tempo, SubtensorModule::get_current_block_as_u64(), diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 5ed7591eae..170f945b0c 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -690,9 +690,9 @@ pub(crate) fn next_block_no_epoch(netuid: NetUid) -> u64 { let high_tempo: u16 = u16::MAX - 1; let old_tempo: u16 = SubtensorModule::get_tempo(netuid); - SubtensorModule::set_tempo(netuid, high_tempo); + SubtensorModule::set_tempo_unchecked(netuid, high_tempo); let new_block = next_block(); - SubtensorModule::set_tempo(netuid, old_tempo); + SubtensorModule::set_tempo_unchecked(netuid, old_tempo); new_block } @@ -703,26 +703,24 @@ pub(crate) fn run_to_block_no_epoch(netuid: NetUid, n: u64) { let high_tempo: u16 = u16::MAX - 1; let old_tempo: u16 = SubtensorModule::get_tempo(netuid); - SubtensorModule::set_tempo(netuid, high_tempo); + SubtensorModule::set_tempo_unchecked(netuid, high_tempo); run_to_block(n); - SubtensorModule::set_tempo(netuid, old_tempo); + SubtensorModule::set_tempo_unchecked(netuid, old_tempo); } #[allow(dead_code)] pub(crate) fn step_epochs(count: u16, netuid: NetUid) { for _ in 0..count { - let blocks_to_next_epoch = SubtensorModule::blocks_until_next_epoch( + let blocks_to_next_epoch = SubtensorModule::blocks_until_next_auto_epoch( netuid, SubtensorModule::get_tempo(netuid), SubtensorModule::get_current_block_as_u64(), ); log::info!("Blocks to next epoch: {blocks_to_next_epoch:?}"); + // Step to the auto-epoch block — `on_initialize` at that block fires + // the epoch and advances `LastEpochBlock`, then move one block past + // it to mirror the legacy stepping cadence. step_block(blocks_to_next_epoch as u16); - - assert!(SubtensorModule::should_run_epoch( - netuid, - SubtensorModule::get_current_block_as_u64() - )); step_block(1); } } diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 36cf17bfd8..c097976826 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -2230,7 +2230,7 @@ fn test_tempo_change_during_commit_reveal_process() { let tempo_before_next_reveal: u16 = 200; log::info!("Changing tempo to {tempo_before_next_reveal}"); - SubtensorModule::set_tempo(netuid, tempo_before_next_reveal); + SubtensorModule::set_tempo_unchecked(netuid, tempo_before_next_reveal); step_epochs(1, netuid); log::info!( @@ -2263,7 +2263,7 @@ fn test_tempo_change_during_commit_reveal_process() { let tempo: u16 = 150; log::info!("Changing tempo to {tempo}"); - SubtensorModule::set_tempo(netuid, tempo); + SubtensorModule::set_tempo_unchecked(netuid, tempo); step_epochs(1, netuid); log::info!( @@ -2286,7 +2286,7 @@ fn test_tempo_change_during_commit_reveal_process() { let tempo: u16 = 1050; log::info!("Changing tempo to {tempo}"); - SubtensorModule::set_tempo(netuid, tempo); + SubtensorModule::set_tempo_unchecked(netuid, tempo); assert_ok!(SubtensorModule::commit_weights( RuntimeOrigin::signed(hotkey), @@ -2300,7 +2300,7 @@ fn test_tempo_change_during_commit_reveal_process() { let tempo: u16 = 805; log::info!("Changing tempo to {tempo}"); - SubtensorModule::set_tempo(netuid, tempo); + SubtensorModule::set_tempo_unchecked(netuid, tempo); step_epochs(1, netuid); log::info!( @@ -3148,7 +3148,7 @@ fn test_tempo_and_reveal_period_change_during_commit_reveal_process() { // Step 2: Change tempo and reveal period after commit let new_tempo: u16 = 50; let new_reveal_period: u64 = 2; - SubtensorModule::set_tempo(netuid, new_tempo); + SubtensorModule::set_tempo_unchecked(netuid, new_tempo); assert_ok!(SubtensorModule::set_reveal_period(netuid, new_reveal_period)); log::info!( "Changed tempo to {new_tempo} and reveal period to {new_reveal_period}" @@ -3202,7 +3202,7 @@ fn test_tempo_and_reveal_period_change_during_commit_reveal_process() { // Step 4: Change tempo and reveal period again after reveal let new_tempo_after_reveal: u16 = 200; let new_reveal_period_after_reveal: u64 = 1; - SubtensorModule::set_tempo(netuid, new_tempo_after_reveal); + SubtensorModule::set_tempo_unchecked(netuid, new_tempo_after_reveal); assert_ok!(SubtensorModule::set_reveal_period( netuid, new_reveal_period_after_reveal @@ -4271,7 +4271,7 @@ fn test_highly_concurrent_commits_and_reveals_with_multiple_hotkeys() { } // ==== Modify Network Parameters During Commits ==== - SubtensorModule::set_tempo(netuid, 150); + SubtensorModule::set_tempo_unchecked(netuid, 150); assert_ok!(SubtensorModule::set_reveal_period(netuid, 7)); log::info!("Changed tempo to 150 and reveal_period to 7 during commits."); @@ -4317,7 +4317,7 @@ fn test_highly_concurrent_commits_and_reveals_with_multiple_hotkeys() { } // ==== Change Network Parameters Again ==== - SubtensorModule::set_tempo(netuid, 200); + SubtensorModule::set_tempo_unchecked(netuid, 200); assert_ok!(SubtensorModule::set_reveal_period(netuid, 10)); log::info!("Changed tempo to 200 and reveal_period to 10 after initial reveals."); @@ -6288,6 +6288,7 @@ fn test_get_first_block_of_epoch_large_epoch() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::weights::test_get_first_block_of_epoch_step_blocks_and_assert_with_until_next --exact --show-output --nocapture #[test] fn test_get_first_block_of_epoch_step_blocks_and_assert_with_until_next() { new_test_ext(1).execute_with(|| { @@ -6312,10 +6313,17 @@ fn test_get_first_block_of_epoch_step_blocks_and_assert_with_until_next() { expected_epoch ); - // From here, blocks_until_next_epoch should point to the start of next epoch - let until_next = SubtensorModule::blocks_until_next_epoch(netuid, tempo, current_block); let next_first = SubtensorModule::get_first_block_of_epoch(netuid, expected_epoch + 1); - assert_eq!(current_block + until_next + 1, next_first); // +1 since until is blocks to end, +1 to start next + + // From here, blocks_until_next_auto_epoch should point to the next firing under the + // state-based scheduler: `LastEpochBlock + tempo + 1`. + let last_epoch_block = LastEpochBlock::::get(netuid); + let expected_next_firing = last_epoch_block + .saturating_add(tempo as u64) + .saturating_add(1); + let until_next = + SubtensorModule::blocks_until_next_auto_epoch(netuid, tempo, current_block); + assert_eq!(current_block + until_next, expected_next_firing); // Advance to near end of this epoch let last_block = next_first.saturating_sub(1); @@ -6326,10 +6334,14 @@ fn test_get_first_block_of_epoch_step_blocks_and_assert_with_until_next() { expected_epoch ); - // Until next from near end + // Until next from near end — same invariant against the post-step state. + let last_epoch_block = LastEpochBlock::::get(netuid); + let expected_next_firing = last_epoch_block + .saturating_add(tempo as u64) + .saturating_add(1); let until_next_end = - SubtensorModule::blocks_until_next_epoch(netuid, tempo, current_block); - assert_eq!(current_block + until_next_end + 1, next_first); + SubtensorModule::blocks_until_next_auto_epoch(netuid, tempo, current_block); + assert_eq!(current_block + until_next_end, expected_next_firing); } }); } diff --git a/precompiles/src/neuron.rs b/precompiles/src/neuron.rs index 1397baf272..f94940b3d6 100644 --- a/precompiles/src/neuron.rs +++ b/precompiles/src/neuron.rs @@ -303,7 +303,7 @@ mod tests { pallet_subtensor::Pallet::::set_burn(netuid, REGISTRATION_BURN.into()); pallet_subtensor::Pallet::::set_max_allowed_uids(netuid, 4096); pallet_subtensor::Pallet::::set_weights_set_rate_limit(netuid, 0); - pallet_subtensor::Pallet::::set_tempo(netuid, TEMPO); + pallet_subtensor::Pallet::::set_tempo_unchecked(netuid, TEMPO); pallet_subtensor::Pallet::::set_commit_reveal_weights_enabled(netuid, true); pallet_subtensor::Pallet::::set_reveal_period(netuid, REVEAL_PERIOD) .expect("reveal period setup should succeed"); From 02f43ee5a85d49f5944e68ddd07c16f71da74e85 Mon Sep 17 00:00:00 2001 From: Evgeny Svirsky Date: Thu, 7 May 2026 17:01:24 +0200 Subject: [PATCH 07/11] clippy --- eco-tests/src/helpers.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/eco-tests/src/helpers.rs b/eco-tests/src/helpers.rs index c306ffc96f..146c3c17e5 100644 --- a/eco-tests/src/helpers.rs +++ b/eco-tests/src/helpers.rs @@ -87,9 +87,9 @@ pub fn next_block_no_epoch(netuid: NetUid) -> u64 { let high_tempo: u16 = u16::MAX - 1; let old_tempo: u16 = SubtensorModule::get_tempo(netuid); - SubtensorModule::set_tempo(netuid, high_tempo); + SubtensorModule::set_tempo_unchecked(netuid, high_tempo); let new_block = next_block(); - SubtensorModule::set_tempo(netuid, old_tempo); + SubtensorModule::set_tempo_unchecked(netuid, old_tempo); new_block } @@ -99,9 +99,9 @@ pub fn run_to_block_no_epoch(netuid: NetUid, n: u64) { let high_tempo: u16 = u16::MAX - 1; let old_tempo: u16 = SubtensorModule::get_tempo(netuid); - SubtensorModule::set_tempo(netuid, high_tempo); + SubtensorModule::set_tempo_unchecked(netuid, high_tempo); run_to_block(n); - SubtensorModule::set_tempo(netuid, old_tempo); + SubtensorModule::set_tempo_unchecked(netuid, old_tempo); } pub fn step_epochs(count: u16, netuid: NetUid) { From c3fa4179d5f0161f2508c1a838c4c96714ac9871 Mon Sep 17 00:00:00 2001 From: Evgeny Svirsky Date: Thu, 7 May 2026 19:22:21 +0200 Subject: [PATCH 08/11] fixed test from devnet --- pallets/subtensor/src/tests/locks.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/tests/locks.rs b/pallets/subtensor/src/tests/locks.rs index 00472bebe5..7ec8040a95 100644 --- a/pallets/subtensor/src/tests/locks.rs +++ b/pallets/subtensor/src/tests/locks.rs @@ -2027,7 +2027,7 @@ fn test_epoch_distribution_auto_locks_owner_cut() { let subnet_tempo = 10; let stake = 100_000_000_000u64; - SubtensorModule::set_tempo(netuid, subnet_tempo); + SubtensorModule::set_tempo_unchecked(netuid, subnet_tempo); SubtensorModule::set_ck_burn(0); setup_reserves(netuid, (stake * 10_000).into(), (stake * 10_000).into()); @@ -2090,7 +2090,7 @@ fn test_epoch_distribution_auto_locks_owner_cut() { ); // Advance to the next epoch so owner cut is distributed and auto-locked. - step_block(subnet_tempo); + step_epochs(1, netuid); let owner_stake_after = get_alpha(&subnet_owner_hotkey, &subnet_owner_coldkey, netuid); let owner_cut_locked = owner_stake_after - owner_stake_before; From 73d1ab4ead54e4f6bec9d1f14754398df132504c Mon Sep 17 00:00:00 2001 From: Evgeny Svirsky Date: Fri, 8 May 2026 13:25:33 +0200 Subject: [PATCH 09/11] - disable dynamic tempo for subnets with CR enabled. --- .../subtensor/src/coinbase/tempo_control.rs | 12 ++ pallets/subtensor/src/macros/errors.rs | 3 + pallets/subtensor/src/tests/mod.rs | 1 + pallets/subtensor/src/tests/tempo_control.rs | 104 ++++++++++++++++++ 4 files changed, 120 insertions(+) create mode 100644 pallets/subtensor/src/tests/tempo_control.rs diff --git a/pallets/subtensor/src/coinbase/tempo_control.rs b/pallets/subtensor/src/coinbase/tempo_control.rs index 6e3f325d41..c526754648 100644 --- a/pallets/subtensor/src/coinbase/tempo_control.rs +++ b/pallets/subtensor/src/coinbase/tempo_control.rs @@ -12,6 +12,12 @@ impl Pallet { pub fn do_set_tempo(origin: OriginFor, netuid: NetUid, tempo: u16) -> DispatchResult { let who = Self::ensure_subnet_owner(origin, netuid)?; + // Block dynamic tempo for any CR-enabled subnet + ensure!( + !Self::get_commit_reveal_weights_enabled(netuid), + Error::::DynamicTempoBlockedByCommitReveal + ); + ensure!( (MIN_TEMPO..=MAX_TEMPO).contains(&tempo), Error::::TempoOutOfBounds @@ -69,6 +75,12 @@ impl Pallet { pub fn do_trigger_epoch(origin: OriginFor, netuid: NetUid) -> Result<(), DispatchError> { let who = Self::ensure_subnet_owner(origin, netuid)?; + // Block for any CR-enabled subnet + ensure!( + !Self::get_commit_reveal_weights_enabled(netuid), + Error::::DynamicTempoBlockedByCommitReveal + ); + // No `ensure_admin_window_open` here: trigger *defines* the next epoch. ensure!( PendingEpochAt::::get(netuid) == 0, diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index e5537816cb..16e3420c10 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -311,5 +311,8 @@ mod errors { ActivityCutoffFactorMilliOutOfBounds, /// `trigger_epoch` called while a previously triggered epoch is still pending. EpochTriggerAlreadyPending, + /// Owner-side `set_tempo`/`trigger_epoch` blocked because commit-reveal is enabled + /// for this subnet + DynamicTempoBlockedByCommitReveal, } } diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index f3d363ec29..f4d3e007be 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -31,6 +31,7 @@ mod swap_coldkey; mod swap_hotkey; mod swap_hotkey_with_subnet; mod tao; +mod tempo_control; mod uids; mod voting_power; mod weights; diff --git a/pallets/subtensor/src/tests/tempo_control.rs b/pallets/subtensor/src/tests/tempo_control.rs new file mode 100644 index 0000000000..b06abf51c3 --- /dev/null +++ b/pallets/subtensor/src/tests/tempo_control.rs @@ -0,0 +1,104 @@ +#![allow(clippy::expect_used)] +use frame_support::{assert_noop, assert_ok}; +use frame_system::Config; +use sp_core::U256; +use subtensor_runtime_common::NetUid; + +use super::mock::*; +use crate::{ + AdminFreezeWindow, CommitRevealWeightsEnabled, Error, PendingEpochAt, SubnetOwner, + SubtokenEnabled, Tempo, +}; + +const DEFAULT_TEMPO: u16 = 360; +const NEW_TEMPO: u16 = 720; + +fn setup_subnet(owner: U256) -> NetUid { + let netuid = NetUid::from(1); + add_network(netuid, DEFAULT_TEMPO, 0); + SubnetOwner::::insert(netuid, owner); + SubtokenEnabled::::insert(netuid, true); + crate::Pallet::::set_admin_freeze_window(0); + netuid +} + +#[test] +fn do_set_tempo_blocked_when_commit_reveal_enabled() { + new_test_ext(1).execute_with(|| { + let owner = U256::from(1); + let netuid = setup_subnet(owner); + + // Default for `CommitRevealWeightsEnabled` is `true` (DefaultCommitRevealWeightsEnabled). + assert!(CommitRevealWeightsEnabled::::get(netuid)); + + assert_noop!( + crate::Pallet::::do_set_tempo( + <::RuntimeOrigin>::signed(owner), + netuid, + NEW_TEMPO, + ), + Error::::DynamicTempoBlockedByCommitReveal + ); + + // Tempo unchanged. + assert_eq!(Tempo::::get(netuid), DEFAULT_TEMPO); + }); +} + +#[test] +fn do_set_tempo_passes_when_commit_reveal_disabled() { + new_test_ext(1).execute_with(|| { + let owner = U256::from(1); + let netuid = setup_subnet(owner); + + CommitRevealWeightsEnabled::::insert(netuid, false); + + assert_ok!(crate::Pallet::::do_set_tempo( + <::RuntimeOrigin>::signed(owner), + netuid, + NEW_TEMPO, + )); + + assert_eq!(Tempo::::get(netuid), NEW_TEMPO); + }); +} + +#[test] +fn do_trigger_epoch_blocked_when_commit_reveal_enabled() { + new_test_ext(1).execute_with(|| { + let owner = U256::from(1); + let netuid = setup_subnet(owner); + + assert!(CommitRevealWeightsEnabled::::get(netuid)); + + assert_noop!( + crate::Pallet::::do_trigger_epoch( + <::RuntimeOrigin>::signed(owner), + netuid, + ), + Error::::DynamicTempoBlockedByCommitReveal + ); + + // No pending trigger recorded. + assert_eq!(PendingEpochAt::::get(netuid), 0); + }); +} + +#[test] +fn do_trigger_epoch_passes_when_commit_reveal_disabled() { + new_test_ext(1).execute_with(|| { + let owner = U256::from(1); + let netuid = setup_subnet(owner); + + CommitRevealWeightsEnabled::::insert(netuid, false); + AdminFreezeWindow::::set(5); + + assert_ok!(crate::Pallet::::do_trigger_epoch( + <::RuntimeOrigin>::signed(owner), + netuid, + )); + + let now = crate::Pallet::::get_current_block_as_u64(); + assert_eq!(PendingEpochAt::::get(netuid), now + 5); + }); +} From 1ba4a3d3980b179202ccec02f92d0b4a639e1711 Mon Sep 17 00:00:00 2001 From: Evgeny Svirsky Date: Fri, 8 May 2026 14:07:50 +0200 Subject: [PATCH 10/11] - update migration - do not clamp tempos --- .../src/migrations/migrate_dynamic_tempo.rs | 28 ++--- pallets/subtensor/src/tests/migration.rs | 112 ++++++++++++++++++ 2 files changed, 122 insertions(+), 18 deletions(-) diff --git a/pallets/subtensor/src/migrations/migrate_dynamic_tempo.rs b/pallets/subtensor/src/migrations/migrate_dynamic_tempo.rs index 0eba21cb24..7bc38275a6 100644 --- a/pallets/subtensor/src/migrations/migrate_dynamic_tempo.rs +++ b/pallets/subtensor/src/migrations/migrate_dynamic_tempo.rs @@ -9,14 +9,16 @@ use scale_info::prelude::string::String; /// post-upgrade epoch lands on the same block as the legacy modulo formula /// `(block + netuid + 1) % (tempo + 1) == 0`. The new scheduler period is /// `tempo + 1` (next firing at `LastEpochBlock + tempo + 1`). -/// 2. Defensively clamps `Tempo` values in `(0, MIN_TEMPO) ∪ (MAX_TEMPO, u16::MAX]` -/// into `[MIN_TEMPO, MAX_TEMPO]`. Subnets with `Tempo == 0` are left as-is — the -/// legacy short-circuit keeps them dormant and matches their pre-upgrade behaviour. -/// 3. Converts each subnet's existing `ActivityCutoff[netuid]` (absolute block count) +/// Existing `Tempo[netuid]` values are preserved as-is regardless of whether +/// they fall inside `[MIN_TEMPO, MAX_TEMPO]`. Owner-side `set_tempo` enforces +/// the bounds for new updates; root-side `sudo_set_tempo` can still write any +/// `u16`. Subnets with `Tempo == 0` are left as-is — the legacy short-circuit +/// keeps them dormant and matches their pre-upgrade behaviour. +/// 2. Converts each subnet's existing `ActivityCutoff[netuid]` (absolute block count) /// into `ActivityCutoffFactorMilli[netuid]` (per-mille of `tempo`) so that /// `factor * tempo / 1000 ≈ old_cutoff` post-upgrade. Production defaults -/// (`tempo=360`, `cutoff=5000`) round-trip to 4999 blocks (1-block delta from -/// integer division, ≈0.02%). Out-of-range factors are clamped to +/// (`tempo=360`, `cutoff=5000`) round-trip to 5000 blocks exactly via ceiling +/// division. Out-of-range factors are clamped to /// `[MIN_ACTIVITY_CUTOFF_FACTOR_MILLI, MAX_ACTIVITY_CUTOFF_FACTOR_MILLI]` — /// extreme historical cutoffs may shift to the nearest representable factor. pub fn migrate_dynamic_tempo() -> Weight { @@ -34,7 +36,6 @@ pub fn migrate_dynamic_tempo() -> Weight { let current_block = Pallet::::get_current_block_as_u64(); let mut visited: u64 = 0; - let mut tempo_clamped: u64 = 0; let mut last_epoch_seeded: u64 = 0; let mut activity_factor_seeded: u64 = 0; let mut activity_factor_clamped: u64 = 0; @@ -46,7 +47,7 @@ pub fn migrate_dynamic_tempo() -> Weight { for netuid in netuids.into_iter() { visited = visited.saturating_add(1); - let mut tempo = Tempo::::get(netuid); + let tempo = Tempo::::get(netuid); reads = reads.saturating_add(1); if tempo == 0 { @@ -54,15 +55,6 @@ pub fn migrate_dynamic_tempo() -> Weight { continue; } - // Defensive bounds clamp. - let clamped = tempo.clamp(MIN_TEMPO, MAX_TEMPO); - if clamped != tempo { - tempo = clamped; - Tempo::::insert(netuid, tempo); - tempo_clamped = tempo_clamped.saturating_add(1); - writes = writes.saturating_add(1); - } - // Compute next-epoch block under the *legacy* modulo formula and back-fill // `LastEpochBlock` so the *new* formula yields the same next-epoch block. // Legacy `blocks_until_next_epoch`: @@ -107,7 +99,7 @@ pub fn migrate_dynamic_tempo() -> Weight { total_weight = total_weight.saturating_add(T::DbWeight::get().reads_writes(reads, writes)); log::info!( - "Dynamic tempo migration: visited={visited}, tempo_clamped={tempo_clamped}, last_epoch_seeded={last_epoch_seeded}, activity_factor_seeded={activity_factor_seeded}, activity_factor_clamped={activity_factor_clamped}" + "Dynamic tempo migration: visited={visited}, last_epoch_seeded={last_epoch_seeded}, activity_factor_seeded={activity_factor_seeded}, activity_factor_clamped={activity_factor_clamped}" ); HasMigrationRun::::insert(&mig_name, true); diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index bf280556e0..18874788dc 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -4356,3 +4356,115 @@ fn test_migrate_subnet_balances() { assert!(HasMigrationRun::::get(MIGRATION_NAME.to_vec())); }); } + +#[test] +fn test_migrate_dynamic_tempo_aligns_first_post_upgrade_fire() { + new_test_ext(1).execute_with(|| { + const MIGRATION_NAME: &str = "dynamic_tempo_v1"; + let netuid = NetUid::from(7u16); + let tempo: u16 = 360; + + add_network(netuid, tempo, 0); + run_to_block(1234); + + // Snapshot legacy formula's next-fire block at the migration moment. + let legacy_blocks_until_next = + crate::Pallet::::blocks_until_next_auto_epoch(netuid, tempo, 1234); + let expected_next_fire = 1234u64 + legacy_blocks_until_next; + + crate::migrations::migrate_dynamic_tempo::migrate_dynamic_tempo::(); + + // New formula: next fire = LastEpochBlock + tempo + 1. + let last_epoch = LastEpochBlock::::get(netuid); + assert_eq!( + last_epoch + tempo as u64 + 1, + expected_next_fire, + "back-fill should make new scheduler fire at the same block as legacy modulo" + ); + assert!(HasMigrationRun::::get( + MIGRATION_NAME.as_bytes().to_vec() + )); + }); +} + +#[test] +fn test_migrate_dynamic_tempo_preserves_non_standard_tempo() { + new_test_ext(1).execute_with(|| { + // Three subnets — one standard, two with non-standard tempo + // (simulates the 2 mainnet subnets root configured outside MIN/MAX bounds). + let standard = NetUid::from(1u16); + let small = NetUid::from(2u16); + let large = NetUid::from(3u16); + + add_network(standard, 360, 0); + add_network(small, 10, 0); // < MIN_TEMPO (360) + add_network(large, 60_000, 0); // > MAX_TEMPO (50_400) + + crate::migrations::migrate_dynamic_tempo::migrate_dynamic_tempo::(); + + // Tempo values preserved as-is — no clamp. + assert_eq!(Tempo::::get(standard), 360); + assert_eq!(Tempo::::get(small), 10); + assert_eq!(Tempo::::get(large), 60_000); + + // All non-zero tempos got LastEpochBlock seeded. + assert!(LastEpochBlock::::contains_key(standard)); + assert!(LastEpochBlock::::contains_key(small)); + assert!(LastEpochBlock::::contains_key(large)); + }); +} + +#[test] +fn test_migrate_dynamic_tempo_activity_cutoff_round_trips_production_values() { + new_test_ext(1).execute_with(|| { + // (cutoff_blocks, tempo) combinations from production data. + let cases: [(u16, u16); 6] = [ + (5000, 360), + (6000, 360), + (7200, 360), + (12000, 360), + (1000, 360), + (360, 360), + ]; + + for (i, &(cutoff, tempo)) in cases.iter().enumerate() { + let netuid = NetUid::from((i + 1) as u16); + add_network(netuid, tempo, 0); + ActivityCutoff::::insert(netuid, cutoff); + } + + crate::migrations::migrate_dynamic_tempo::migrate_dynamic_tempo::(); + + for (i, &(cutoff, _)) in cases.iter().enumerate() { + let netuid = NetUid::from((i + 1) as u16); + // get_activity_cutoff_blocks = factor * tempo / 1000 must equal original cutoff exactly. + assert_eq!( + crate::Pallet::::get_activity_cutoff_blocks(netuid), + cutoff as u64, + "ceiling division must round-trip cutoff exactly for netuid {}", + u16::from(netuid) + ); + } + }); +} + +#[test] +fn test_migrate_dynamic_tempo_idempotent() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + add_network(netuid, 360, 0); + + crate::migrations::migrate_dynamic_tempo::migrate_dynamic_tempo::(); + let last_epoch_first = LastEpochBlock::::get(netuid); + + // Mutate state to verify second run is a no-op. + run_to_block(crate::Pallet::::get_current_block_as_u64() + 100); + crate::migrations::migrate_dynamic_tempo::migrate_dynamic_tempo::(); + + assert_eq!( + LastEpochBlock::::get(netuid), + last_epoch_first, + "second migration call must be a no-op" + ); + }); +} From 215f13b9d89405ac74723cb549d88fbc4b1b53a4 Mon Sep 17 00:00:00 2001 From: Evgeny Svirsky Date: Fri, 8 May 2026 14:27:18 +0200 Subject: [PATCH 11/11] fix migration test --- pallets/subtensor/src/tests/migration.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index 18874788dc..d2fa0d3574 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -4365,12 +4365,16 @@ fn test_migrate_dynamic_tempo_aligns_first_post_upgrade_fire() { let tempo: u16 = 360; add_network(netuid, tempo, 0); - run_to_block(1234); - - // Snapshot legacy formula's next-fire block at the migration moment. - let legacy_blocks_until_next = - crate::Pallet::::blocks_until_next_auto_epoch(netuid, tempo, 1234); - let expected_next_fire = 1234u64 + legacy_blocks_until_next; + let current_block = 1234u64; + run_to_block(current_block); + + // Compute next-fire block + let netuid_plus_one = (u16::from(netuid) as u64) + 1; + let tempo_plus_one = (tempo as u64) + 1; + let adjusted = current_block + netuid_plus_one; + let remainder = adjusted % tempo_plus_one; + let legacy_blocks_until_next = (tempo as u64) - remainder; + let expected_next_fire = current_block + legacy_blocks_until_next; crate::migrations::migrate_dynamic_tempo::migrate_dynamic_tempo::();