Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion pallets/admin-utils/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -975,7 +975,10 @@ pub mod pallet {
pallet_subtensor::Pallet::<T>::if_subnet_exist(netuid),
Error::<T>::SubnetDoesNotExist
);
pallet_subtensor::Pallet::<T>::set_tempo(netuid, tempo);
pallet_subtensor::Pallet::<T>::set_tempo_unchecked(netuid, tempo);
// Cycle reset on every successful set_tempo
let now = pallet_subtensor::Pallet::<T>::get_current_block_as_u64();
pallet_subtensor::LastEpochBlock::<T>::insert(netuid, now);
log::debug!("TempoSet( netuid: {netuid:?} tempo: {tempo:?} ) ");
Ok(())
}
Expand Down
6 changes: 4 additions & 2 deletions pallets/subtensor/src/coinbase/block_step.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,11 @@ impl<T: Config + pallet_drand::Config> Pallet<T> {
}

fn try_set_pending_children(block_number: u64) {
// Called *after* `run_coinbase` has advanced `LastEpochBlock` for any
// subnet whose epoch slot fired this block — `should_run_epoch` is no
// longer true. Detect "epoch just fired" by `LastEpochBlock == block`.
for netuid in Self::get_all_subnet_netuids() {
if Self::should_run_epoch(netuid, block_number) {
// Set pending children on the epoch.
if LastEpochBlock::<T>::get(netuid) == block_number {
Self::do_set_pending_children(netuid);
}
}
Expand Down
1 change: 1 addition & 0 deletions pallets/subtensor/src/coinbase/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,4 @@ pub mod root;
pub mod run_coinbase;
pub mod subnet_emissions;
pub mod tao;
pub mod tempo_control;
3 changes: 3 additions & 0 deletions pallets/subtensor/src/coinbase/root.rs
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,9 @@ impl<T: Config> Pallet<T> {
MaxAllowedUids::<T>::remove(netuid);
ImmunityPeriod::<T>::remove(netuid);
ActivityCutoff::<T>::remove(netuid);
ActivityCutoffFactorMilli::<T>::remove(netuid);
LastEpochBlock::<T>::remove(netuid);
PendingEpochAt::<T>::remove(netuid);
MinAllowedWeights::<T>::remove(netuid);
RegistrationsThisInterval::<T>::remove(netuid);
POWRegistrationsThisInterval::<T>::remove(netuid);
Expand Down
92 changes: 66 additions & 26 deletions pallets/subtensor/src/coinbase/run_coinbase.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,14 @@ impl<T: Config> Pallet<T> {
let emissions_to_distribute = Self::drain_pending(&subnets, current_block);

// --- 6. Distribute the emissions to the subnets.
// Bonds masking inside `distribute_emission` reads `LastMechansimStepBlock` and
// must see the previous successful run, so we delay the write until after.
Self::distribute_emissions_to_subnets(&emissions_to_distribute);

// --- 7. Mark each successful epoch run as the last mechanism step.
for netuid in emissions_to_distribute.keys() {
LastMechansimStepBlock::<T>::insert(*netuid, current_block);
}
}

pub fn inject_and_maybe_swap(
Expand Down Expand Up @@ -318,19 +325,35 @@ impl<T: Config> Pallet<T> {
NetUid,
(AlphaBalance, AlphaBalance, AlphaBalance, AlphaBalance),
> = BTreeMap::new();
// --- Drain pending emissions for all subnets hat are at their tempo.
// Run the epoch for *all* subnets, even if we don't emit anything.
// Per-block cap on number of epochs that may run; the rest are deferred 1 block forward
// by setting `PendingEpochAt`.
let mut epochs_run_this_block: u32 = 0;

for &netuid in subnets.iter() {
// Increment blocks since last step.
// Increment blocks since last *successful* step (existing semantics).
BlocksSinceLastStep::<T>::mutate(netuid, |total| *total = total.saturating_add(1));

// Run the epoch if applicable.
if Self::should_run_epoch(netuid, current_block)
&& Self::is_epoch_input_state_consistent(netuid)
{
// Restart counters.
if !Self::should_run_epoch(netuid, current_block) {
continue;
}

// Per-block cap — defer if already at limit.
if epochs_run_this_block >= MAX_EPOCHS_PER_BLOCK {
let next_block = current_block.saturating_add(1);
PendingEpochAt::<T>::insert(netuid, next_block);
Self::deposit_event(Event::EpochDeferred {
netuid,
from_block: current_block,
to_block: next_block,
});
continue;
}

if Self::is_epoch_input_state_consistent(netuid) {
// Reset blocks-since counter; LastMechansimStepBlock is written
// post-distribute (see the caller), so bonds masking can read the
// previous successful run.
BlocksSinceLastStep::<T>::insert(netuid, 0);
LastMechansimStepBlock::<T>::insert(netuid, current_block);

// Get and drain the subnet pending emission.
let pending_server_alpha = PendingServerEmission::<T>::get(netuid);
Expand All @@ -357,7 +380,19 @@ impl<T: Config> Pallet<T> {
owner_cut,
),
);
epochs_run_this_block = epochs_run_this_block.saturating_add(1);
} else {
// Schedule advances below; execution skipped. Pending emissions accumulate
// and will be drained by the next successful epoch.
Self::deposit_event(Event::EpochSkippedDueToInconsistentState {
netuid,
block: current_block,
});
}

// Advance the schedule unconditionally — the slot is consumed.
LastEpochBlock::<T>::insert(netuid, current_block);
PendingEpochAt::<T>::insert(netuid, 0);
}
emissions_to_distribute
}
Expand Down Expand Up @@ -993,28 +1028,33 @@ impl<T: Config> Pallet<T> {
/// # Returns
/// * `bool` - True if the epoch should run, false otherwise.
pub fn should_run_epoch(netuid: NetUid, current_block: u64) -> bool {
Self::blocks_until_next_epoch(netuid, Self::get_tempo(netuid), current_block) == 0
let tempo = Self::get_tempo(netuid);
if tempo == 0 {
return false;
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This means that if tempo is set to 0, manual trigger or max cap of MAX_TEMPO will not work. Is this by design?

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, the previous implementation meant that if tempo is 0, we don't run tempo on this subnet.
We don't have this case in Mainnet, but we support it. There is still a possibility to set the tempo to 0 by the root.
So we support the same semantics.

}
let pending = PendingEpochAt::<T>::get(netuid);
if pending > 0 && current_block >= pending {
return true;
}
if BlocksSinceLastStep::<T>::get(netuid) > MAX_TEMPO as u64 {
return true;
}
let last = LastEpochBlock::<T>::get(netuid);
let blocks_since = current_block.saturating_sub(last);
blocks_since > tempo as u64
}

/// Helper function which returns the number of blocks remaining before we will run the epoch on this
/// network. Networks run their epoch when (block_number + netuid + 1 ) % (tempo + 1) = 0
/// tempo | netuid | # first epoch block
/// 1 0 0
/// 1 1 1
/// 2 0 1
/// 2 1 0
/// 100 0 99
/// 100 1 98
/// Special case: tempo = 0, the network never runs.
///
/// Returns the number of blocks remaining before the next automatic epoch under the
/// stateful scheduler (period `tempo + 1`, anchored on `LastEpochBlock`). Used by the
/// admin-freeze-window predicate and external tooling. Returns `u64::MAX` when
/// `tempo == 0` (legacy defensive short-circuit).
pub fn blocks_until_next_epoch(netuid: NetUid, tempo: u16, block_number: u64) -> u64 {
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

"Period is tempo + 1: next firing at last + tempo + 1." comment and the code below is not anymore correct.

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated the function name and the comment to avoid the confusion: ad7ba80

if tempo == 0 {
return u64::MAX;
}
let netuid_plus_one = (u16::from(netuid) as u64).saturating_add(1);
let tempo_plus_one = (tempo as u64).saturating_add(1);
let adjusted_block = block_number.wrapping_add(netuid_plus_one);
let remainder = adjusted_block.checked_rem(tempo_plus_one).unwrap_or(0);
(tempo as u64).saturating_sub(remainder)
let last = LastEpochBlock::<T>::get(netuid);
// Period is `tempo + 1`: next firing at `last + tempo + 1`.
let next_auto = last.saturating_add(tempo as u64).saturating_add(1);
next_auto.saturating_sub(block_number)
}
}
102 changes: 102 additions & 0 deletions pallets/subtensor/src/coinbase/tempo_control.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
use super::*;
use crate::Error;
use frame_support::pallet_prelude::DispatchResult;
use sp_runtime::DispatchError;
use subtensor_runtime_common::NetUid;

use crate::system::pallet_prelude::OriginFor;
use crate::utils::rate_limiting::{Hyperparameter, TransactionType};

impl<T: Config> Pallet<T> {
/// Owner-side `set_tempo` implementation.
pub fn do_set_tempo(origin: OriginFor<T>, netuid: NetUid, tempo: u16) -> DispatchResult {
let who = Self::ensure_subnet_owner(origin, netuid)?;

ensure!(
(MIN_TEMPO..=MAX_TEMPO).contains(&tempo),
Error::<T>::TempoOutOfBounds
);

Self::ensure_admin_window_open(netuid)?;

let tx = TransactionType::TempoUpdate;
ensure!(
tx.passes_rate_limit_on_subnet::<T>(&who, netuid),
Error::<T>::TxRateLimitExceeded
);

let now = Self::get_current_block_as_u64();

Tempo::<T>::insert(netuid, tempo);
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should this be a function since Tempo and LastEpochBlock are always updated together?

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added a helper function: df184e3

// Cycle reset on every successful set_tempo
LastEpochBlock::<T>::insert(netuid, now);

tx.set_last_block_on_subnet::<T>(&who, netuid, now);

Self::deposit_event(Event::TempoSet(netuid, tempo));
Ok(())
}

/// Owner-side `set_activity_cutoff_factor` implementation.
pub fn do_set_activity_cutoff_factor(
origin: OriginFor<T>,
netuid: NetUid,
factor_milli: u32,
) -> DispatchResult {
let who = Self::ensure_subnet_owner(origin, netuid)?;

ensure!(
(MIN_ACTIVITY_CUTOFF_FACTOR_MILLI..=MAX_ACTIVITY_CUTOFF_FACTOR_MILLI)
.contains(&factor_milli),
Error::<T>::ActivityCutoffFactorMilliOutOfBounds
);

Self::ensure_admin_window_open(netuid)?;

let tx = TransactionType::OwnerHyperparamUpdate(Hyperparameter::ActivityCutoffFactorMilli);
ensure!(
tx.passes_rate_limit_on_subnet::<T>(&who, netuid),
Error::<T>::TxRateLimitExceeded
);

let now = Self::get_current_block_as_u64();

Self::set_activity_cutoff_factor_milli(netuid, factor_milli);
tx.set_last_block_on_subnet::<T>(&who, netuid, now);

Ok(())
}

/// Owner-side `trigger_epoch` implementation.
/// Schedules the triggered epoch to fire after `AdminFreezeWindow` blocks; that
/// countdown engages the freeze window for the subnet via `is_in_admin_freeze_window`.
pub fn do_trigger_epoch(origin: OriginFor<T>, netuid: NetUid) -> Result<(), DispatchError> {
let who = Self::ensure_subnet_owner(origin, netuid)?;

// No `ensure_admin_window_open` here: trigger *defines* the next epoch.
ensure!(
PendingEpochAt::<T>::get(netuid) == 0,
Error::<T>::EpochTriggerAlreadyPending
);

let tx = TransactionType::OwnerHyperparamUpdate(Hyperparameter::TriggerEpoch);
ensure!(
tx.passes_rate_limit_on_subnet::<T>(&who, netuid),
Error::<T>::TxRateLimitExceeded
);

let now = Self::get_current_block_as_u64();
let window = AdminFreezeWindow::<T>::get() as u64;
let fires_at = now.saturating_add(window);

PendingEpochAt::<T>::insert(netuid, fires_at);
tx.set_last_block_on_subnet::<T>(&who, netuid, now);

Self::deposit_event(Event::EpochTriggered {
netuid,
by: who,
fires_at,
});
Ok(())
}
}
28 changes: 23 additions & 5 deletions pallets/subtensor/src/epoch/run_epoch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ impl<T: Config> Pallet<T> {
log::trace!("tempo: {tempo:?}");

// Get activity cutoff.
let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64;
let activity_cutoff: u64 = Self::get_activity_cutoff_blocks(netuid);
log::trace!("activity_cutoff: {activity_cutoff:?}");

// Last update vector.
Expand Down Expand Up @@ -205,7 +205,13 @@ impl<T: Config> Pallet<T> {
// Recently registered matrix, recently_ij=True if last_tempo was *before* j was last registered.
// Mask if: the last tempo block happened *before* the registration block
// ==> last_tempo <= registered
let last_tempo: u64 = current_block.saturating_sub(tempo);
// For dynamic tempo - we pick previous-successful-epoch block: `LastMechansimStepBlock + 1`
let lms = LastMechansimStepBlock::<T>::get(netuid);
let last_tempo: u64 = if lms == 0 {
current_block.saturating_sub(tempo)
} else {
lms.saturating_add(1)
};
let recently_registered: Vec<bool> = block_at_registration
.iter()
.map(|registered| last_tempo <= *registered)
Expand Down Expand Up @@ -595,7 +601,7 @@ impl<T: Config> Pallet<T> {
log::trace!("tempo:\n{tempo:?}\n");

// Get activity cutoff.
let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64;
let activity_cutoff: u64 = Self::get_activity_cutoff_blocks(netuid);
log::trace!("activity_cutoff: {activity_cutoff:?}");

// Last update vector.
Expand Down Expand Up @@ -819,7 +825,13 @@ impl<T: Config> Pallet<T> {
// Remove bonds referring to neurons that have registered since last tempo.
// Mask if: the last tempo block happened *before* the registration block
// ==> last_tempo <= registered
let last_tempo: u64 = current_block.saturating_sub(tempo);
// For dynamic tempo - we pick previous-successful-epoch block: `LastMechansimStepBlock + 1`
let lms = LastMechansimStepBlock::<T>::get(netuid);
let last_tempo: u64 = if lms == 0 {
current_block.saturating_sub(tempo)
} else {
lms.saturating_add(1)
};
bonds = scalar_vec_mask_sparse_matrix(
&bonds,
last_tempo,
Expand Down Expand Up @@ -859,7 +871,13 @@ impl<T: Config> Pallet<T> {
// Remove bonds referring to neurons that have registered since last tempo.
// Mask if: the last tempo block happened *before* the registration block
// ==> last_tempo <= registered
let last_tempo: u64 = current_block.saturating_sub(tempo);
// For dynamic tempo - we pick previous-successful-epoch block: `LastMechansimStepBlock + 1`
let lms = LastMechansimStepBlock::<T>::get(netuid);
let last_tempo: u64 = if lms == 0 {
current_block.saturating_sub(tempo)
} else {
lms.saturating_add(1)
};
bonds = scalar_vec_mask_sparse_matrix(
&bonds,
last_tempo,
Expand Down
40 changes: 40 additions & 0 deletions pallets/subtensor/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1731,6 +1731,46 @@ pub mod pallet {
#[pallet::storage]
pub type Tempo<T> = StorageMap<_, Identity, NetUid, u16, ValueQuery, DefaultTempo<T>>;

/// Lower bound for owner-set tempo. Also the fixed cooldown for `set_tempo`.
pub const MIN_TEMPO: u16 = 360;
/// Upper bound for owner-set tempo (≈ 7 days at 12 s/block).
pub const MAX_TEMPO: u16 = 50_400;
/// Lower bound for activity-cutoff factor (per-mille). 1_000 = one full tempo.
pub const MIN_ACTIVITY_CUTOFF_FACTOR_MILLI: u32 = 1_000;
/// Upper bound for activity-cutoff factor (per-mille). 50_000 = 50 tempos.
pub const MAX_ACTIVITY_CUTOFF_FACTOR_MILLI: u32 = 50_000;
/// Default activity-cutoff factor (per-mille). 13_889 ≈ legacy 5000-block cutoff
/// at default tempo 360 (`13_889 * 360 / 1000 = 5_000`, exact via ceiling rounding).
pub const INITIAL_ACTIVITY_CUTOFF_FACTOR_MILLI: u32 = 13_889;
/// Per-block cap on number of epochs that may execute in a single `block_step`.
pub const MAX_EPOCHS_PER_BLOCK: u32 = 2;

/// Default value for activity-cutoff factor (per-mille).
#[pallet::type_value]
pub fn DefaultActivityCutoffFactorMilli<T: Config>() -> u32 {
INITIAL_ACTIVITY_CUTOFF_FACTOR_MILLI
}

/// --- MAP ( netuid ) --> last epoch attempt block (consumed slot).
/// Drives normal-cadence scheduling and the admin freeze window.
/// Advances on every `should_run_epoch == true` slot — including consistency-skipped slots —
/// and on a successful `set_tempo` (cycle reset).
#[pallet::storage]
pub type LastEpochBlock<T> =
StorageMap<_, Identity, NetUid, u64, ValueQuery, DefaultZeroU64<T>>;

/// --- MAP ( netuid ) --> block at which a manually triggered epoch should fire.
/// `0` means no trigger pending. Cleared after the triggered epoch runs.
#[pallet::storage]
pub type PendingEpochAt<T> =
StorageMap<_, Identity, NetUid, u64, ValueQuery, DefaultZeroU64<T>>;

/// --- MAP ( netuid ) --> activity-cutoff factor in per-mille epochs (1/1000 granularity).
/// Effective cutoff in blocks = `(factor × tempo) / 1000`, clamped to ≥ 1.
#[pallet::storage]
pub type ActivityCutoffFactorMilli<T> =
StorageMap<_, Identity, NetUid, u32, ValueQuery, DefaultActivityCutoffFactorMilli<T>>;

/// ============================
/// ==== Subnet Parameters =====
/// ============================
Expand Down
Loading
Loading