Skip to content
10 changes: 5 additions & 5 deletions eco-tests/src/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,9 @@ pub fn next_block_no_epoch(netuid: NetUid) -> u64 {
let high_tempo: u16 = u16::MAX - 1;
let old_tempo: u16 = SubtensorModule::get_tempo(netuid);

SubtensorModule::set_tempo(netuid, high_tempo);
SubtensorModule::set_tempo_unchecked(netuid, high_tempo);
let new_block = next_block();
SubtensorModule::set_tempo(netuid, old_tempo);
SubtensorModule::set_tempo_unchecked(netuid, old_tempo);

new_block
}
Expand All @@ -99,14 +99,14 @@ pub fn run_to_block_no_epoch(netuid: NetUid, n: u64) {
let high_tempo: u16 = u16::MAX - 1;
let old_tempo: u16 = SubtensorModule::get_tempo(netuid);

SubtensorModule::set_tempo(netuid, high_tempo);
SubtensorModule::set_tempo_unchecked(netuid, high_tempo);
run_to_block(n);
SubtensorModule::set_tempo(netuid, old_tempo);
SubtensorModule::set_tempo_unchecked(netuid, old_tempo);
}

pub fn step_epochs(count: u16, netuid: NetUid) {
for _ in 0..count {
let blocks_to_next_epoch = SubtensorModule::blocks_until_next_epoch(
let blocks_to_next_epoch = SubtensorModule::blocks_until_next_auto_epoch(
netuid,
SubtensorModule::get_tempo(netuid),
SubtensorModule::get_current_block_as_u64(),
Expand Down
2 changes: 1 addition & 1 deletion pallets/admin-utils/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -975,7 +975,7 @@ pub mod pallet {
pallet_subtensor::Pallet::<T>::if_subnet_exist(netuid),
Error::<T>::SubnetDoesNotExist
);
pallet_subtensor::Pallet::<T>::set_tempo(netuid, tempo);
pallet_subtensor::Pallet::<T>::apply_tempo_with_cycle_reset(netuid, tempo);
log::debug!("TempoSet( netuid: {netuid:?} tempo: {tempo:?} ) ");
Ok(())
}
Expand Down
16 changes: 10 additions & 6 deletions pallets/admin-utils/src/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1981,16 +1981,20 @@ fn test_sudo_set_admin_freeze_window_and_rate() {
fn test_freeze_window_blocks_root_and_owner() {
new_test_ext().execute_with(|| {
let netuid = NetUid::from(1);
let tempo = 10;
let tempo: u16 = 10;
// Create subnet with tempo 10
add_network(netuid, tempo);
// Set freeze window to 3 blocks
assert_ok!(AdminUtils::sudo_set_admin_freeze_window(
<<Test as Config>::RuntimeOrigin>::root(),
3
));
// Advance to a block where remaining < 3
run_to_block((tempo - 2).into());
// Pin the state-based scheduler so the next auto-epoch lands at
// `tempo + 1`. Freeze window covers blocks (next_auto - 3, next_auto].
pallet_subtensor::LastEpochBlock::<Test>::insert(netuid, 0);
let next_auto = (tempo as u64).saturating_add(1);
// Advance to a block inside the freeze window (remaining < 3).
run_to_block(next_auto - 2);

// Root should be blocked during freeze window
assert_noop!(
Expand Down Expand Up @@ -2086,7 +2090,7 @@ fn test_owner_hyperparam_update_rate_limit_enforced() {
SubnetOwner::<Test>::insert(netuid, owner);

// Set tempo to 1 so owner hyperparam RL = 2 tempos = 2 blocks
SubtensorModule::set_tempo(netuid, 1);
SubtensorModule::set_tempo_unchecked(netuid, 1);
// Disable admin freeze window to avoid blocking on small tempo
assert_ok!(AdminUtils::sudo_set_admin_freeze_window(
<<Test as Config>::RuntimeOrigin>::root(),
Expand Down Expand Up @@ -2141,7 +2145,7 @@ fn test_hyperparam_rate_limit_enforced_by_tempo() {
SubnetOwner::<Test>::insert(netuid, owner);

// Set tempo to 1 so RL = 2 blocks
SubtensorModule::set_tempo(netuid, 1);
SubtensorModule::set_tempo_unchecked(netuid, 1);
// Disable admin freeze window to avoid blocking on small tempo
assert_ok!(AdminUtils::sudo_set_admin_freeze_window(
<<Test as Config>::RuntimeOrigin>::root(),
Expand Down Expand Up @@ -2189,7 +2193,7 @@ fn test_owner_hyperparam_rate_limit_independent_per_param() {
SubnetOwner::<Test>::insert(netuid, owner);

// Use small tempo to make RL short and deterministic (2 blocks when tempo=1)
SubtensorModule::set_tempo(netuid, 1);
SubtensorModule::set_tempo_unchecked(netuid, 1);
// Disable admin freeze window so it doesn't interfere with small tempo
assert_ok!(AdminUtils::sudo_set_admin_freeze_window(
<<Test as Config>::RuntimeOrigin>::root(),
Expand Down
6 changes: 4 additions & 2 deletions pallets/subtensor/src/coinbase/block_step.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,11 @@ impl<T: Config + pallet_drand::Config> Pallet<T> {
}

fn try_set_pending_children(block_number: u64) {
// Called *after* `run_coinbase` has advanced `LastEpochBlock` for any
// subnet whose epoch slot fired this block — `should_run_epoch` is no
// longer true. Detect "epoch just fired" by `LastEpochBlock == block`.
for netuid in Self::get_all_subnet_netuids() {
if Self::should_run_epoch(netuid, block_number) {
// Set pending children on the epoch.
if LastEpochBlock::<T>::get(netuid) == block_number {
Self::do_set_pending_children(netuid);
}
}
Expand Down
1 change: 1 addition & 0 deletions pallets/subtensor/src/coinbase/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@ pub mod root;
pub mod run_coinbase;
pub mod subnet_emissions;
pub mod tao;
pub mod tempo_control;
3 changes: 3 additions & 0 deletions pallets/subtensor/src/coinbase/root.rs
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,9 @@ impl<T: Config> Pallet<T> {
MaxAllowedUids::<T>::remove(netuid);
ImmunityPeriod::<T>::remove(netuid);
ActivityCutoff::<T>::remove(netuid);
ActivityCutoffFactorMilli::<T>::remove(netuid);
LastEpochBlock::<T>::remove(netuid);
PendingEpochAt::<T>::remove(netuid);
MinAllowedWeights::<T>::remove(netuid);
RegistrationsThisInterval::<T>::remove(netuid);
POWRegistrationsThisInterval::<T>::remove(netuid);
Expand Down
97 changes: 70 additions & 27 deletions pallets/subtensor/src/coinbase/run_coinbase.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,14 @@ impl<T: Config> Pallet<T> {
let emissions_to_distribute = Self::drain_pending(&subnets, current_block);

// --- 6. Distribute the emissions to the subnets.
// Bonds masking inside `distribute_emission` reads `LastMechansimStepBlock` and
// must see the previous successful run, so we delay the write until after.
Self::distribute_emissions_to_subnets(&emissions_to_distribute);

// --- 7. Mark each successful epoch run as the last mechanism step.
for netuid in emissions_to_distribute.keys() {
LastMechansimStepBlock::<T>::insert(*netuid, current_block);
}
}

pub fn inject_and_maybe_swap(
Expand Down Expand Up @@ -318,19 +325,35 @@ impl<T: Config> Pallet<T> {
NetUid,
(AlphaBalance, AlphaBalance, AlphaBalance, AlphaBalance),
> = BTreeMap::new();
// --- Drain pending emissions for all subnets hat are at their tempo.
// Run the epoch for *all* subnets, even if we don't emit anything.
// Per-block cap on number of epochs that may run; the rest are deferred 1 block forward
// by setting `PendingEpochAt`.
let mut epochs_run_this_block: u32 = 0;

for &netuid in subnets.iter() {
// Increment blocks since last step.
// Increment blocks since last *successful* step (existing semantics).
BlocksSinceLastStep::<T>::mutate(netuid, |total| *total = total.saturating_add(1));

// Run the epoch if applicable.
if Self::should_run_epoch(netuid, current_block)
&& Self::is_epoch_input_state_consistent(netuid)
{
// Restart counters.
if !Self::should_run_epoch(netuid, current_block) {
continue;
}

// Per-block cap — defer if already at limit.
if epochs_run_this_block >= MAX_EPOCHS_PER_BLOCK {
let next_block = current_block.saturating_add(1);
PendingEpochAt::<T>::insert(netuid, next_block);
Self::deposit_event(Event::EpochDeferred {
netuid,
from_block: current_block,
to_block: next_block,
});
continue;
}

if Self::is_epoch_input_state_consistent(netuid) {
// Reset blocks-since counter; LastMechansimStepBlock is written
// post-distribute (see the caller), so bonds masking can read the
// previous successful run.
BlocksSinceLastStep::<T>::insert(netuid, 0);
LastMechansimStepBlock::<T>::insert(netuid, current_block);

// Get and drain the subnet pending emission.
let pending_server_alpha = PendingServerEmission::<T>::get(netuid);
Expand All @@ -357,7 +380,19 @@ impl<T: Config> Pallet<T> {
owner_cut,
),
);
epochs_run_this_block = epochs_run_this_block.saturating_add(1);
} else {
// Schedule advances below; execution skipped. Pending emissions accumulate
// and will be drained by the next successful epoch.
Self::deposit_event(Event::EpochSkippedDueToInconsistentState {
netuid,
block: current_block,
});
}

// Advance the schedule unconditionally — the slot is consumed.
LastEpochBlock::<T>::insert(netuid, current_block);
PendingEpochAt::<T>::insert(netuid, 0);
}
emissions_to_distribute
}
Expand Down Expand Up @@ -996,28 +1031,36 @@ impl<T: Config> Pallet<T> {
/// # Returns
/// * `bool` - True if the epoch should run, false otherwise.
pub fn should_run_epoch(netuid: NetUid, current_block: u64) -> bool {
Self::blocks_until_next_epoch(netuid, Self::get_tempo(netuid), current_block) == 0
let tempo = Self::get_tempo(netuid);
if tempo == 0 {
return false;
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This means that if tempo is set to 0, manual trigger or max cap of MAX_TEMPO will not work. Is this by design?

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, the previous implementation meant that if tempo is 0, we don't run tempo on this subnet.
We don't have this case in Mainnet, but we support it. There is still a possibility to set the tempo to 0 by the root.
So we support the same semantics.

}
let pending = PendingEpochAt::<T>::get(netuid);
if pending > 0 && current_block >= pending {
return true;
}
if BlocksSinceLastStep::<T>::get(netuid) > MAX_TEMPO as u64 {
return true;
}
let last = LastEpochBlock::<T>::get(netuid);
let blocks_since = current_block.saturating_sub(last);
blocks_since > tempo as u64
}

/// Helper function which returns the number of blocks remaining before we will run the epoch on this
/// network. Networks run their epoch when (block_number + netuid + 1 ) % (tempo + 1) = 0
/// tempo | netuid | # first epoch block
/// 1 0 0
/// 1 1 1
/// 2 0 1
/// 2 1 0
/// 100 0 99
/// 100 1 98
/// Special case: tempo = 0, the network never runs.
///
pub fn blocks_until_next_epoch(netuid: NetUid, tempo: u16, block_number: u64) -> u64 {
/// Returns the number of blocks remaining before the next automatic epoch under the
/// stateful scheduler (period `tempo + 1`, anchored on `LastEpochBlock`). Does NOT account for:
/// - `PendingEpochAt` (owner-triggered manual fire — could happen sooner),
/// - `BlocksSinceLastStep > MAX_TEMPO` safety-net,
/// - per-block-cap defer (could push the actual fire one or more blocks later)
/// Used by the admin-freeze-window predicate and external tooling. Returns `u64::MAX` when
/// `tempo == 0` (legacy defensive short-circuit).
pub fn blocks_until_next_auto_epoch(netuid: NetUid, tempo: u16, block_number: u64) -> u64 {
if tempo == 0 {
return u64::MAX;
}
let netuid_plus_one = (u16::from(netuid) as u64).saturating_add(1);
let tempo_plus_one = (tempo as u64).saturating_add(1);
let adjusted_block = block_number.wrapping_add(netuid_plus_one);
let remainder = adjusted_block.checked_rem(tempo_plus_one).unwrap_or(0);
(tempo as u64).saturating_sub(remainder)
let last = LastEpochBlock::<T>::get(netuid);
// Period is `tempo + 1`: next firing at `last + tempo + 1`.
let next_auto = last.saturating_add(tempo as u64).saturating_add(1);
next_auto.saturating_sub(block_number)
}
}
110 changes: 110 additions & 0 deletions pallets/subtensor/src/coinbase/tempo_control.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
use super::*;
use crate::Error;
use frame_support::pallet_prelude::DispatchResult;
use sp_runtime::DispatchError;
use subtensor_runtime_common::NetUid;

use crate::system::pallet_prelude::OriginFor;
use crate::utils::rate_limiting::{Hyperparameter, TransactionType};

impl<T: Config> Pallet<T> {
/// Owner-side `set_tempo` implementation.
pub fn do_set_tempo(origin: OriginFor<T>, netuid: NetUid, tempo: u16) -> DispatchResult {
let who = Self::ensure_subnet_owner(origin, netuid)?;

// Block dynamic tempo for any CR-enabled subnet
ensure!(
!Self::get_commit_reveal_weights_enabled(netuid),
Error::<T>::DynamicTempoBlockedByCommitReveal
);

ensure!(
(MIN_TEMPO..=MAX_TEMPO).contains(&tempo),
Error::<T>::TempoOutOfBounds
);

Self::ensure_admin_window_open(netuid)?;

let tx = TransactionType::TempoUpdate;
ensure!(
tx.passes_rate_limit_on_subnet::<T>(&who, netuid),
Error::<T>::TxRateLimitExceeded
);

let now = Self::get_current_block_as_u64();

Self::apply_tempo_with_cycle_reset(netuid, tempo);

tx.set_last_block_on_subnet::<T>(&who, netuid, now);
Ok(())
}

/// Owner-side `set_activity_cutoff_factor` implementation.
pub fn do_set_activity_cutoff_factor(
origin: OriginFor<T>,
netuid: NetUid,
factor_milli: u32,
) -> DispatchResult {
let who = Self::ensure_subnet_owner(origin, netuid)?;

ensure!(
(MIN_ACTIVITY_CUTOFF_FACTOR_MILLI..=MAX_ACTIVITY_CUTOFF_FACTOR_MILLI)
.contains(&factor_milli),
Error::<T>::ActivityCutoffFactorMilliOutOfBounds
);

Self::ensure_admin_window_open(netuid)?;

let tx = TransactionType::OwnerHyperparamUpdate(Hyperparameter::ActivityCutoffFactorMilli);
ensure!(
tx.passes_rate_limit_on_subnet::<T>(&who, netuid),
Error::<T>::TxRateLimitExceeded
);

let now = Self::get_current_block_as_u64();

Self::set_activity_cutoff_factor_milli(netuid, factor_milli);
tx.set_last_block_on_subnet::<T>(&who, netuid, now);

Ok(())
}

/// Owner-side `trigger_epoch` implementation.
/// Schedules the triggered epoch to fire after `AdminFreezeWindow` blocks; that
/// countdown engages the freeze window for the subnet via `is_in_admin_freeze_window`.
pub fn do_trigger_epoch(origin: OriginFor<T>, netuid: NetUid) -> Result<(), DispatchError> {
let who = Self::ensure_subnet_owner(origin, netuid)?;

// Block for any CR-enabled subnet
ensure!(
!Self::get_commit_reveal_weights_enabled(netuid),
Error::<T>::DynamicTempoBlockedByCommitReveal
);

// No `ensure_admin_window_open` here: trigger *defines* the next epoch.
ensure!(
PendingEpochAt::<T>::get(netuid) == 0,
Error::<T>::EpochTriggerAlreadyPending
);

let tx = TransactionType::OwnerHyperparamUpdate(Hyperparameter::TriggerEpoch);
ensure!(
tx.passes_rate_limit_on_subnet::<T>(&who, netuid),
Error::<T>::TxRateLimitExceeded
);

let now = Self::get_current_block_as_u64();
let window = AdminFreezeWindow::<T>::get() as u64;
let fires_at = now.saturating_add(window);

PendingEpochAt::<T>::insert(netuid, fires_at);
tx.set_last_block_on_subnet::<T>(&who, netuid, now);

Self::deposit_event(Event::EpochTriggered {
netuid,
by: who,
fires_at,
});
Ok(())
}
}
Loading
Loading