-
Notifications
You must be signed in to change notification settings - Fork 313
[Feature] Configurable Tempo & Owner-Triggered Epochs #2638
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: devnet-ready
Are you sure you want to change the base?
Changes from 3 commits
d30a76f
42b3e29
c6567e2
ad7ba80
df184e3
b2e4658
02f43ee
40f2773
c3fa417
dca9f44
73d1ab4
1ba4a3d
215f13b
577d62b
ff61eb3
e15d749
7a1d7a3
4c06820
82ae882
bcfe012
34bcc41
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -6,3 +6,4 @@ pub mod root; | |
| pub mod run_coinbase; | ||
| pub mod subnet_emissions; | ||
| pub mod tao; | ||
| pub mod tempo_control; | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -64,7 +64,14 @@ impl<T: Config> Pallet<T> { | |
| let emissions_to_distribute = Self::drain_pending(&subnets, current_block); | ||
|
|
||
| // --- 6. Distribute the emissions to the subnets. | ||
| // Bonds masking inside `distribute_emission` reads `LastMechansimStepBlock` and | ||
| // must see the previous successful run, so we delay the write until after. | ||
| Self::distribute_emissions_to_subnets(&emissions_to_distribute); | ||
|
|
||
| // --- 7. Mark each successful epoch run as the last mechanism step. | ||
| for netuid in emissions_to_distribute.keys() { | ||
| LastMechansimStepBlock::<T>::insert(*netuid, current_block); | ||
| } | ||
| } | ||
|
|
||
| pub fn inject_and_maybe_swap( | ||
|
|
@@ -318,19 +325,35 @@ impl<T: Config> Pallet<T> { | |
| NetUid, | ||
| (AlphaBalance, AlphaBalance, AlphaBalance, AlphaBalance), | ||
| > = BTreeMap::new(); | ||
| // --- Drain pending emissions for all subnets hat are at their tempo. | ||
| // Run the epoch for *all* subnets, even if we don't emit anything. | ||
| // Per-block cap on number of epochs that may run; the rest are deferred 1 block forward | ||
| // by setting `PendingEpochAt`. | ||
| let mut epochs_run_this_block: u32 = 0; | ||
|
|
||
| for &netuid in subnets.iter() { | ||
| // Increment blocks since last step. | ||
| // Increment blocks since last *successful* step (existing semantics). | ||
| BlocksSinceLastStep::<T>::mutate(netuid, |total| *total = total.saturating_add(1)); | ||
|
|
||
| // Run the epoch if applicable. | ||
| if Self::should_run_epoch(netuid, current_block) | ||
| && Self::is_epoch_input_state_consistent(netuid) | ||
| { | ||
| // Restart counters. | ||
| if !Self::should_run_epoch(netuid, current_block) { | ||
| continue; | ||
| } | ||
|
|
||
| // Per-block cap — defer if already at limit. | ||
| if epochs_run_this_block >= MAX_EPOCHS_PER_BLOCK { | ||
| let next_block = current_block.saturating_add(1); | ||
| PendingEpochAt::<T>::insert(netuid, next_block); | ||
| Self::deposit_event(Event::EpochDeferred { | ||
| netuid, | ||
| from_block: current_block, | ||
| to_block: next_block, | ||
| }); | ||
| continue; | ||
| } | ||
|
|
||
| if Self::is_epoch_input_state_consistent(netuid) { | ||
| // Reset blocks-since counter; LastMechansimStepBlock is written | ||
| // post-distribute (see the caller), so bonds masking can read the | ||
| // previous successful run. | ||
| BlocksSinceLastStep::<T>::insert(netuid, 0); | ||
| LastMechansimStepBlock::<T>::insert(netuid, current_block); | ||
|
|
||
| // Get and drain the subnet pending emission. | ||
| let pending_server_alpha = PendingServerEmission::<T>::get(netuid); | ||
|
|
@@ -357,7 +380,19 @@ impl<T: Config> Pallet<T> { | |
| owner_cut, | ||
| ), | ||
| ); | ||
| epochs_run_this_block = epochs_run_this_block.saturating_add(1); | ||
| } else { | ||
| // Schedule advances below; execution skipped. Pending emissions accumulate | ||
| // and will be drained by the next successful epoch. | ||
| Self::deposit_event(Event::EpochSkippedDueToInconsistentState { | ||
| netuid, | ||
| block: current_block, | ||
| }); | ||
| } | ||
|
|
||
| // Advance the schedule unconditionally — the slot is consumed. | ||
| LastEpochBlock::<T>::insert(netuid, current_block); | ||
| PendingEpochAt::<T>::insert(netuid, 0); | ||
| } | ||
| emissions_to_distribute | ||
| } | ||
|
|
@@ -993,28 +1028,33 @@ impl<T: Config> Pallet<T> { | |
| /// # Returns | ||
| /// * `bool` - True if the epoch should run, false otherwise. | ||
| pub fn should_run_epoch(netuid: NetUid, current_block: u64) -> bool { | ||
| Self::blocks_until_next_epoch(netuid, Self::get_tempo(netuid), current_block) == 0 | ||
| let tempo = Self::get_tempo(netuid); | ||
| if tempo == 0 { | ||
| return false; | ||
| } | ||
| let pending = PendingEpochAt::<T>::get(netuid); | ||
| if pending > 0 && current_block >= pending { | ||
| return true; | ||
| } | ||
| if BlocksSinceLastStep::<T>::get(netuid) > MAX_TEMPO as u64 { | ||
| return true; | ||
| } | ||
| let last = LastEpochBlock::<T>::get(netuid); | ||
| let blocks_since = current_block.saturating_sub(last); | ||
| blocks_since > tempo as u64 | ||
| } | ||
|
|
||
| /// Helper function which returns the number of blocks remaining before we will run the epoch on this | ||
| /// network. Networks run their epoch when (block_number + netuid + 1 ) % (tempo + 1) = 0 | ||
| /// tempo | netuid | # first epoch block | ||
| /// 1 0 0 | ||
| /// 1 1 1 | ||
| /// 2 0 1 | ||
| /// 2 1 0 | ||
| /// 100 0 99 | ||
| /// 100 1 98 | ||
| /// Special case: tempo = 0, the network never runs. | ||
| /// | ||
| /// Returns the number of blocks remaining before the next automatic epoch under the | ||
| /// stateful scheduler (period `tempo + 1`, anchored on `LastEpochBlock`). Used by the | ||
| /// admin-freeze-window predicate and external tooling. Returns `u64::MAX` when | ||
| /// `tempo == 0` (legacy defensive short-circuit). | ||
| pub fn blocks_until_next_epoch(netuid: NetUid, tempo: u16, block_number: u64) -> u64 { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. "Period is
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Updated the function name and the comment to avoid the confusion: ad7ba80 |
||
| if tempo == 0 { | ||
| return u64::MAX; | ||
| } | ||
| let netuid_plus_one = (u16::from(netuid) as u64).saturating_add(1); | ||
| let tempo_plus_one = (tempo as u64).saturating_add(1); | ||
| let adjusted_block = block_number.wrapping_add(netuid_plus_one); | ||
| let remainder = adjusted_block.checked_rem(tempo_plus_one).unwrap_or(0); | ||
| (tempo as u64).saturating_sub(remainder) | ||
| let last = LastEpochBlock::<T>::get(netuid); | ||
| // Period is `tempo + 1`: next firing at `last + tempo + 1`. | ||
| let next_auto = last.saturating_add(tempo as u64).saturating_add(1); | ||
| next_auto.saturating_sub(block_number) | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,102 @@ | ||
| use super::*; | ||
| use crate::Error; | ||
| use frame_support::pallet_prelude::DispatchResult; | ||
| use sp_runtime::DispatchError; | ||
| use subtensor_runtime_common::NetUid; | ||
|
|
||
| use crate::system::pallet_prelude::OriginFor; | ||
| use crate::utils::rate_limiting::{Hyperparameter, TransactionType}; | ||
|
|
||
| impl<T: Config> Pallet<T> { | ||
| /// Owner-side `set_tempo` implementation. | ||
| pub fn do_set_tempo(origin: OriginFor<T>, netuid: NetUid, tempo: u16) -> DispatchResult { | ||
| let who = Self::ensure_subnet_owner(origin, netuid)?; | ||
|
|
||
| ensure!( | ||
| (MIN_TEMPO..=MAX_TEMPO).contains(&tempo), | ||
| Error::<T>::TempoOutOfBounds | ||
| ); | ||
|
|
||
| Self::ensure_admin_window_open(netuid)?; | ||
|
|
||
| let tx = TransactionType::TempoUpdate; | ||
| ensure!( | ||
| tx.passes_rate_limit_on_subnet::<T>(&who, netuid), | ||
| Error::<T>::TxRateLimitExceeded | ||
| ); | ||
|
|
||
| let now = Self::get_current_block_as_u64(); | ||
|
|
||
| Tempo::<T>::insert(netuid, tempo); | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should this be a function since Tempo and LastEpochBlock are always updated together?
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Added a helper function: df184e3 |
||
| // Cycle reset on every successful set_tempo | ||
| LastEpochBlock::<T>::insert(netuid, now); | ||
|
|
||
| tx.set_last_block_on_subnet::<T>(&who, netuid, now); | ||
|
|
||
| Self::deposit_event(Event::TempoSet(netuid, tempo)); | ||
| Ok(()) | ||
| } | ||
|
|
||
| /// Owner-side `set_activity_cutoff_factor` implementation. | ||
| pub fn do_set_activity_cutoff_factor( | ||
| origin: OriginFor<T>, | ||
| netuid: NetUid, | ||
| factor_milli: u32, | ||
| ) -> DispatchResult { | ||
| let who = Self::ensure_subnet_owner(origin, netuid)?; | ||
|
|
||
| ensure!( | ||
| (MIN_ACTIVITY_CUTOFF_FACTOR_MILLI..=MAX_ACTIVITY_CUTOFF_FACTOR_MILLI) | ||
| .contains(&factor_milli), | ||
| Error::<T>::ActivityCutoffFactorMilliOutOfBounds | ||
| ); | ||
|
|
||
| Self::ensure_admin_window_open(netuid)?; | ||
|
|
||
| let tx = TransactionType::OwnerHyperparamUpdate(Hyperparameter::ActivityCutoffFactorMilli); | ||
| ensure!( | ||
| tx.passes_rate_limit_on_subnet::<T>(&who, netuid), | ||
| Error::<T>::TxRateLimitExceeded | ||
| ); | ||
|
|
||
| let now = Self::get_current_block_as_u64(); | ||
|
|
||
| Self::set_activity_cutoff_factor_milli(netuid, factor_milli); | ||
| tx.set_last_block_on_subnet::<T>(&who, netuid, now); | ||
|
|
||
| Ok(()) | ||
| } | ||
|
|
||
| /// Owner-side `trigger_epoch` implementation. | ||
| /// Schedules the triggered epoch to fire after `AdminFreezeWindow` blocks; that | ||
| /// countdown engages the freeze window for the subnet via `is_in_admin_freeze_window`. | ||
| pub fn do_trigger_epoch(origin: OriginFor<T>, netuid: NetUid) -> Result<(), DispatchError> { | ||
| let who = Self::ensure_subnet_owner(origin, netuid)?; | ||
|
|
||
| // No `ensure_admin_window_open` here: trigger *defines* the next epoch. | ||
| ensure!( | ||
| PendingEpochAt::<T>::get(netuid) == 0, | ||
| Error::<T>::EpochTriggerAlreadyPending | ||
| ); | ||
|
|
||
| let tx = TransactionType::OwnerHyperparamUpdate(Hyperparameter::TriggerEpoch); | ||
| ensure!( | ||
| tx.passes_rate_limit_on_subnet::<T>(&who, netuid), | ||
| Error::<T>::TxRateLimitExceeded | ||
| ); | ||
|
|
||
| let now = Self::get_current_block_as_u64(); | ||
| let window = AdminFreezeWindow::<T>::get() as u64; | ||
| let fires_at = now.saturating_add(window); | ||
|
|
||
| PendingEpochAt::<T>::insert(netuid, fires_at); | ||
| tx.set_last_block_on_subnet::<T>(&who, netuid, now); | ||
|
|
||
| Self::deposit_event(Event::EpochTriggered { | ||
| netuid, | ||
| by: who, | ||
| fires_at, | ||
| }); | ||
| Ok(()) | ||
| } | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This means that if tempo is set to 0, manual trigger or max cap of MAX_TEMPO will not work. Is this by design?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, the previous implementation meant that if tempo is 0, we don't run tempo on this subnet.
We don't have this case in Mainnet, but we support it. There is still a possibility to set the tempo to 0 by the root.
So we support the same semantics.