diff --git a/.gitignore b/.gitignore index 1df31f05..682252de 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,5 @@ coverage mainnet/**/outputs/* mainnet/outputs/* +._* +*-wasm-memory \ No newline at end of file diff --git a/patches/2025-10-07-hb-balances-patch.lua b/patches/2025-10-07-hb-balances-patch.lua new file mode 100644 index 00000000..96838768 --- /dev/null +++ b/patches/2025-10-07-hb-balances-patch.lua @@ -0,0 +1,2873 @@ +--[[ + Adds HB patch message sending for when balances are modified. + + - load code in + - Initiaize HB via ao.send({device = "patch@1.0", balances = { device = "trie@1.0" } }) + + - After, in a seperate message, can call the Patch-Hyperbeam-Balances action to send the balances and hydrate the HB state + + Reviewers: Ariel, Atticus +]] + +-- Copied from aos-bundled.lua after building +-- module: ".src.hb" +local function _loaded_mod_src_hb() + -- hb.lua needs to be in its own file and not in balances.lua to avoid circular dependencies + local hb = {} + + ---@param oldBalances table A table of addresses and their balances + ---@return table affectedBalancesAddresses table of addresses that have had balance changes + function hb.patchBalances(oldBalances) + assert(type(oldBalances) == "table", "Old balances must be a table") + local affectedBalancesAddresses = {} + for address, _ in pairs(oldBalances) do + if Balances[address] ~= oldBalances[address] then + affectedBalancesAddresses[address] = true + end + end + for address, _ in pairs(Balances) do + if oldBalances[address] ~= Balances[address] then + affectedBalancesAddresses[address] = true + end + end + + --- For simplicity we always include the protocol balance in the patch message + --- this also prevents us from sending an empty patch message and deleting the entire hyperbeam balances table\ + + local patchMessage = { + device = "patch@1.0", + balances = { [ao.id] = tostring(Balances[ao.id] or 0) }, + } + for address, _ in pairs(affectedBalancesAddresses) do + patchMessage.balances[address] = tostring(Balances[address] or 0) + end + + -- only send the patch message if there are affected balances, otherwise we'll end up deleting the entire hyperbeam balances table + if next(patchMessage.balances) == nil then + return {} + else + ao.send(patchMessage) + end + + return affectedBalancesAddresses + end + + return hb +end + +_G.package.loaded[".src.hb"] = _loaded_mod_src_hb() + +-- copied from aos-bundled.lua after building +-- module: ".src.main" +local function _loaded_mod_src_main() + local main = {} + local constants = require(".src.constants") + local token = require(".src.token") + local utils = require(".src.utils") + local json = require(".src.json") + local balances = require(".src.balances") + local hb = require(".src.hb") + local arns = require(".src.arns") + local gar = require(".src.gar") + local demand = require(".src.demand") + local epochs = require(".src.epochs") + local vaults = require(".src.vaults") + local prune = require(".src.prune") + local tick = require(".src.tick") + local primaryNames = require(".src.primary_names") + local ARIOEvent = require(".src.ario_event") + + -- handlers that are critical should discard the memory on error (see prune for an example) + local CRITICAL = true + + local ActionMap = { + -- reads + Info = "Info", + TotalSupply = "Total-Supply", -- for token.lua spec compatibility, gives just the total supply (circulating + locked + staked + delegated + withdraw) + TotalTokenSupply = "Total-Token-Supply", -- gives the total token supply and all components (protocol balance, locked supply, staked supply, delegated supply, and withdraw supply) + Transfer = "Transfer", + Balance = "Balance", + Balances = "Balances", + DemandFactor = "Demand-Factor", + DemandFactorInfo = "Demand-Factor-Info", + DemandFactorSettings = "Demand-Factor-Settings", + -- EPOCH READ APIS + Epoch = "Epoch", + EpochSettings = "Epoch-Settings", + PrescribedObservers = "Epoch-Prescribed-Observers", + PrescribedNames = "Epoch-Prescribed-Names", + Observations = "Epoch-Observations", + Distributions = "Epoch-Distributions", + EpochRewards = "Epoch-Eligible-Rewards", + --- Vaults + Vault = "Vault", + Vaults = "Vaults", + CreateVault = "Create-Vault", + VaultedTransfer = "Vaulted-Transfer", + ExtendVault = "Extend-Vault", + IncreaseVault = "Increase-Vault", + RevokeVault = "Revoke-Vault", + -- GATEWAY REGISTRY READ APIS + Gateway = "Gateway", + Gateways = "Gateways", + GatewayRegistrySettings = "Gateway-Registry-Settings", + Delegates = "Delegates", + JoinNetwork = "Join-Network", + LeaveNetwork = "Leave-Network", + IncreaseOperatorStake = "Increase-Operator-Stake", + DecreaseOperatorStake = "Decrease-Operator-Stake", + UpdateGatewaySettings = "Update-Gateway-Settings", + SaveObservations = "Save-Observations", + DelegateStake = "Delegate-Stake", + RedelegateStake = "Redelegate-Stake", + DecreaseDelegateStake = "Decrease-Delegate-Stake", + CancelWithdrawal = "Cancel-Withdrawal", + InstantWithdrawal = "Instant-Withdrawal", + RedelegationFee = "Redelegation-Fee", + AllPaginatedDelegates = "All-Paginated-Delegates", + AllGatewayVaults = "All-Gateway-Vaults", + --- ArNS + Record = "Record", + Records = "Records", + BuyName = "Buy-Name", + UpgradeName = "Upgrade-Name", + ExtendLease = "Extend-Lease", + IncreaseUndernameLimit = "Increase-Undername-Limit", + ReassignName = "Reassign-Name", + ReleaseName = "Release-Name", + ReservedNames = "Reserved-Names", + ReservedName = "Reserved-Name", + TokenCost = "Token-Cost", + CostDetails = "Cost-Details", + RegistrationFees = "Registration-Fees", + ReturnedNames = "Returned-Names", + ReturnedName = "Returned-Name", + AllowDelegates = "Allow-Delegates", + DisallowDelegates = "Disallow-Delegates", + Delegations = "Delegations", + -- PRIMARY NAMES + RemovePrimaryNames = "Remove-Primary-Names", + RequestPrimaryName = "Request-Primary-Name", + PrimaryNameRequest = "Primary-Name-Request", + PrimaryNameRequests = "Primary-Name-Requests", + ApprovePrimaryNameRequest = "Approve-Primary-Name-Request", + PrimaryNames = "Primary-Names", + PrimaryName = "Primary-Name", + -- Hyperbeam Patch Balances + PatchHyperbeamBalances = "Patch-Hyperbeam-Balances", + } + + --- @param msg ParsedMessage + --- @param response any + local function Send(msg, response) + if msg.reply then + --- Reference: https://github.com/permaweb/aos/blob/main/blueprints/patch-legacy-reply.lua + msg.reply(response) + else + ao.send(response) + end + end + + local function eventingPcall(ioEvent, onError, fnToCall, ...) + local status, result = pcall(fnToCall, ...) + if not status then + onError(result) + ioEvent:addField("Error", result) + return status, result + end + return status, result + end + + --- @param fundingPlan FundingPlan|nil + --- @param rewardForInitiator number|nil only applies in buy record for returned names + local function adjustSuppliesForFundingPlan(fundingPlan, rewardForInitiator) + if not fundingPlan then + return + end + rewardForInitiator = rewardForInitiator or 0 + local totalActiveStakesUsed = utils.reduce(fundingPlan.stakes, function(acc, _, stakeSpendingPlan) + return acc + stakeSpendingPlan.delegatedStake + end, 0) + local totalWithdrawStakesUsed = utils.reduce(fundingPlan.stakes, function(acc, _, stakeSpendingPlan) + return acc + + utils.reduce(stakeSpendingPlan.vaults, function(acc2, _, vaultBalance) + return acc2 + vaultBalance + end, 0) + end, 0) + LastKnownStakedSupply = LastKnownStakedSupply - totalActiveStakesUsed + LastKnownWithdrawSupply = LastKnownWithdrawSupply - totalWithdrawStakesUsed + LastKnownCirculatingSupply = LastKnownCirculatingSupply - fundingPlan.balance + rewardForInitiator + end + + --- @param ioEvent ARIOEvent + --- @param result BuyNameResult|RecordInteractionResult|CreatePrimaryNameResult|PrimaryNameRequestApproval + local function addResultFundingPlanFields(ioEvent, result) + ioEvent:addFieldsWithPrefixIfExist(result.fundingPlan, "FP-", { "balance" }) + local fundingPlanVaultsCount = 0 + local fundingPlanStakesAmount = utils.reduce( + result.fundingPlan and result.fundingPlan.stakes or {}, + function(acc, _, delegation) + return acc + + delegation.delegatedStake + + utils.reduce(delegation.vaults, function(acc2, _, vaultAmount) + fundingPlanVaultsCount = fundingPlanVaultsCount + 1 + return acc2 + vaultAmount + end, 0) + end, + 0 + ) + if fundingPlanStakesAmount > 0 then + ioEvent:addField("FP-Stakes-Amount", fundingPlanStakesAmount) + end + if fundingPlanVaultsCount > 0 then + ioEvent:addField("FP-Vaults-Count", fundingPlanVaultsCount) + end + local newWithdrawVaultsTallies = utils.reduce( + result.fundingResult and result.fundingResult.newWithdrawVaults or {}, + function(acc, _, newWithdrawVault) + acc.totalBalance = acc.totalBalance + + utils.reduce(newWithdrawVault, function(acc2, _, vault) + acc.count = acc.count + 1 + return acc2 + vault.balance + end, 0) + return acc + end, + { count = 0, totalBalance = 0 } + ) + if newWithdrawVaultsTallies.count > 0 then + ioEvent:addField("New-Withdraw-Vaults-Count", newWithdrawVaultsTallies.count) + ioEvent:addField("New-Withdraw-Vaults-Total-Balance", newWithdrawVaultsTallies.totalBalance) + end + adjustSuppliesForFundingPlan(result.fundingPlan, result.returnedName and result.returnedName.rewardForInitiator) + end + + --- @param ioEvent ARIOEvent + ---@param result RecordInteractionResult|BuyNameResult + local function addRecordResultFields(ioEvent, result) + ioEvent:addFieldsIfExist(result, { + "baseRegistrationFee", + "remainingBalance", + "protocolBalance", + "recordsCount", + "reservedRecordsCount", + "totalFee", + }) + ioEvent:addFieldsIfExist(result.record, { "startTimestamp", "endTimestamp", "undernameLimit", "purchasePrice" }) + if result.df ~= nil and type(result.df) == "table" then + ioEvent:addField("DF-Trailing-Period-Purchases", (result.df.trailingPeriodPurchases or {})) + ioEvent:addField("DF-Trailing-Period-Revenues", (result.df.trailingPeriodRevenues or {})) + ioEvent:addFieldsWithPrefixIfExist(result.df, "DF-", { + "currentPeriod", + "currentDemandFactor", + "consecutivePeriodsWithMinDemandFactor", + "revenueThisPeriod", + "purchasesThisPeriod", + }) + end + addResultFundingPlanFields(ioEvent, result) + end + + local function addReturnedNameResultFields(ioEvent, result) + ioEvent:addFieldsIfExist(result, { + "rewardForInitiator", + "rewardForProtocol", + "type", + "years", + }) + ioEvent:addFieldsIfExist(result.record, { "startTimestamp", "endTimestamp", "undernameLimit", "purchasePrice" }) + ioEvent:addFieldsIfExist(result.returnedName, { + "name", + "initiator", + "startTimestamp", + }) + -- TODO: add removedPrimaryNamesAndOwners to ioEvent + addResultFundingPlanFields(ioEvent, result) + end + + --- @class SupplyData + --- @field circulatingSupply number|nil + --- @field lockedSupply number|nil + --- @field stakedSupply number|nil + --- @field delegatedSupply number|nil + --- @field withdrawSupply number|nil + --- @field totalTokenSupply number|nil + --- @field protocolBalance number|nil + + --- @param ioEvent ARIOEvent + --- @param supplyData SupplyData|nil + local function addSupplyData(ioEvent, supplyData) + supplyData = supplyData or {} + ioEvent:addField("Circulating-Supply", supplyData.circulatingSupply or LastKnownCirculatingSupply) + ioEvent:addField("Locked-Supply", supplyData.lockedSupply or LastKnownLockedSupply) + ioEvent:addField("Staked-Supply", supplyData.stakedSupply or LastKnownStakedSupply) + ioEvent:addField("Delegated-Supply", supplyData.delegatedSupply or LastKnownDelegatedSupply) + ioEvent:addField("Withdraw-Supply", supplyData.withdrawSupply or LastKnownWithdrawSupply) + ioEvent:addField("Total-Token-Supply", supplyData.totalTokenSupply or token.lastKnownTotalTokenSupply()) + ioEvent:addField("Protocol-Balance", Balances[ao.id]) + end + + --- @param ioEvent ARIOEvent + --- @param talliesData StateObjectTallies|GatewayObjectTallies|nil + local function addTalliesData(ioEvent, talliesData) + ioEvent:addFieldsIfExist(talliesData, { + "numAddressesVaulting", + "numBalanceVaults", + "numBalances", + "numDelegateVaults", + "numDelegatesVaulting", + "numDelegates", + "numDelegations", + "numExitingDelegations", + "numGatewayVaults", + "numGatewaysVaulting", + "numGateways", + "numExitingGateways", + }) + end + + local function gatewayStats() + local numJoinedGateways = 0 + local numLeavingGateways = 0 + for _, gateway in pairs(GatewayRegistry) do + if gateway.status == "joined" then + numJoinedGateways = numJoinedGateways + 1 + else + numLeavingGateways = numLeavingGateways + 1 + end + end + return { + joined = numJoinedGateways, + leaving = numLeavingGateways, + } + end + + --- @param ioEvent ARIOEvent + --- @param pruneGatewaysResult PruneGatewaysResult + local function addPruneGatewaysResult(ioEvent, pruneGatewaysResult) + LastKnownCirculatingSupply = LastKnownCirculatingSupply + + (pruneGatewaysResult.delegateStakeReturned or 0) + + (pruneGatewaysResult.gatewayStakeReturned or 0) + + LastKnownWithdrawSupply = LastKnownWithdrawSupply + - (pruneGatewaysResult.delegateStakeReturned or 0) + - (pruneGatewaysResult.gatewayStakeReturned or 0) + + (pruneGatewaysResult.delegateStakeWithdrawing or 0) + + (pruneGatewaysResult.gatewayStakeWithdrawing or 0) + + LastKnownDelegatedSupply = LastKnownDelegatedSupply - (pruneGatewaysResult.delegateStakeWithdrawing or 0) + + local totalGwStakesSlashed = (pruneGatewaysResult.stakeSlashed or 0) + LastKnownStakedSupply = LastKnownStakedSupply + - totalGwStakesSlashed + - (pruneGatewaysResult.gatewayStakeWithdrawing or 0) + + if totalGwStakesSlashed > 0 then + ioEvent:addField("Total-Gateways-Stake-Slashed", totalGwStakesSlashed) + end + + local prunedGateways = pruneGatewaysResult.prunedGateways or {} + local prunedGatewaysCount = utils.lengthOfTable(prunedGateways) + if prunedGatewaysCount > 0 then + ioEvent:addField("Pruned-Gateways", prunedGateways) + ioEvent:addField("Pruned-Gateways-Count", prunedGatewaysCount) + local gwStats = gatewayStats() + ioEvent:addField("Joined-Gateways-Count", gwStats.joined) + ioEvent:addField("Leaving-Gateways-Count", gwStats.leaving) + end + + local slashedGateways = pruneGatewaysResult.slashedGateways or {} + local slashedGatewaysCount = utils.lengthOfTable(slashedGateways or {}) + if slashedGatewaysCount > 0 then + ioEvent:addField("Slashed-Gateway-Amounts", slashedGateways) + ioEvent:addField("Slashed-Gateways-Count", slashedGatewaysCount) + local invariantSlashedGateways = {} + for gwAddress, _ in pairs(slashedGateways) do + local unsafeGateway = gar.getGatewayUnsafe(gwAddress) or {} + if unsafeGateway and (unsafeGateway.totalDelegatedStake > 0) then + invariantSlashedGateways[gwAddress] = unsafeGateway.totalDelegatedStake + end + end + if utils.lengthOfTable(invariantSlashedGateways) > 0 then + ioEvent:addField("Invariant-Slashed-Gateways", invariantSlashedGateways) + end + end + + addTalliesData(ioEvent, pruneGatewaysResult.gatewayObjectTallies) + end + + --- @param ioEvent ARIOEvent + local function addNextPruneTimestampsData(ioEvent) + ioEvent:addField("Next-Returned-Names-Prune-Timestamp", arns.nextReturnedNamesPruneTimestamp()) + ioEvent:addField("Next-Records-Prune-Timestamp", arns.nextRecordsPruneTimestamp()) + ioEvent:addField("Next-Vaults-Prune-Timestamp", vaults.nextVaultsPruneTimestamp()) + ioEvent:addField("Next-Gateways-Prune-Timestamp", gar.nextGatewaysPruneTimestamp()) + ioEvent:addField("Next-Redelegations-Prune-Timestamp", gar.nextRedelegationsPruneTimestamp()) + ioEvent:addField("Next-Primary-Names-Prune-Timestamp", primaryNames.nextPrimaryNamesPruneTimestamp()) + end + + --- @param ioEvent ARIOEvent + --- @param prunedStateResult PruneStateResult + local function addNextPruneTimestampsResults(ioEvent, prunedStateResult) + --- @type PruneGatewaysResult + local pruneGatewaysResult = prunedStateResult.pruneGatewaysResult + + -- If anything meaningful was pruned, collect the next prune timestamps + if + next(prunedStateResult.prunedReturnedNames) + or next(prunedStateResult.prunedPrimaryNameRequests) + or next(prunedStateResult.prunedRecords) + or next(pruneGatewaysResult.prunedGateways) + or next(prunedStateResult.delegatorsWithFeeReset) + or next(pruneGatewaysResult.slashedGateways) + or pruneGatewaysResult.delegateStakeReturned > 0 + or pruneGatewaysResult.gatewayStakeReturned > 0 + or pruneGatewaysResult.delegateStakeWithdrawing > 0 + or pruneGatewaysResult.gatewayStakeWithdrawing > 0 + or pruneGatewaysResult.stakeSlashed > 0 + then + addNextPruneTimestampsData(ioEvent) + end + end + + local function assertValidFundFrom(fundFrom) + if fundFrom == nil then + return + end + local validFundFrom = utils.createLookupTable({ "any", "balance", "stakes" }) + assert(validFundFrom[fundFrom], "Invalid fund from type. Must be one of: any, balance, stakes") + end + + --- @param ioEvent ARIOEvent + local function addPrimaryNameCounts(ioEvent) + ioEvent:addField("Total-Primary-Names", utils.lengthOfTable(primaryNames.getUnsafePrimaryNames())) + ioEvent:addField( + "Total-Primary-Name-Requests", + utils.lengthOfTable(primaryNames.getUnsafePrimaryNameRequests()) + ) + end + + --- @param ioEvent ARIOEvent + --- @param primaryNameResult CreatePrimaryNameResult|PrimaryNameRequestApproval + local function addPrimaryNameRequestData(ioEvent, primaryNameResult) + ioEvent:addFieldsIfExist(primaryNameResult, { "baseNameOwner" }) + ioEvent:addFieldsIfExist(primaryNameResult.newPrimaryName, { "owner", "startTimestamp" }) + ioEvent:addFieldsWithPrefixIfExist(primaryNameResult.request, "Request-", { "startTimestamp", "endTimestamp" }) + addResultFundingPlanFields(ioEvent, primaryNameResult) + addPrimaryNameCounts(ioEvent) + + -- demand factor data + if primaryNameResult.demandFactor and type(primaryNameResult.demandFactor) == "table" then + ioEvent:addField( + "DF-Trailing-Period-Purchases", + (primaryNameResult.demandFactor.trailingPeriodPurchases or {}) + ) + ioEvent:addField( + "DF-Trailing-Period-Revenues", + (primaryNameResult.demandFactor.trailingPeriodRevenues or {}) + ) + ioEvent:addFieldsWithPrefixIfExist(primaryNameResult.demandFactor, "DF-", { + "currentPeriod", + "currentDemandFactor", + "consecutivePeriodsWithMinDemandFactor", + "revenueThisPeriod", + "purchasesThisPeriod", + }) + end + end + + local function assertValueBytesLowerThan(value, remainingBytes, tablesSeen) + tablesSeen = tablesSeen or {} + + local t = type(value) + if t == "string" then + remainingBytes = remainingBytes - #value + elseif t == "number" or t == "boolean" then + remainingBytes = remainingBytes - 8 -- Approximate size for numbers/booleans + elseif t == "table" and not tablesSeen[value] then + tablesSeen[value] = true + for k, v in pairs(value) do + remainingBytes = assertValueBytesLowerThan(k, remainingBytes, tablesSeen) + remainingBytes = assertValueBytesLowerThan(v, remainingBytes, tablesSeen) + end + end + + if remainingBytes <= 0 then + error("Data size is too large") + end + return remainingBytes + end + + -- Sanitize inputs before every interaction + local function assertAndSanitizeInputs(msg) + if msg.Tags.Action ~= "Eval" and msg.Data then + assertValueBytesLowerThan(msg.Data, 100) + end + + assert( + -- TODO: replace this with LastKnownMessageTimestamp after node release 23.0.0 + msg.Timestamp and tonumber(msg.Timestamp) >= 0, + "Timestamp must be greater than or equal to the last known message timestamp of " + .. LastKnownMessageTimestamp + .. " but was " + .. msg.Timestamp + ) + assert(msg.From, "From is required") + assert(msg.Tags and type(msg.Tags) == "table", "Tags are required") + + msg.Tags = utils.validateAndSanitizeInputs(msg.Tags) + msg.From = utils.formatAddress(msg.From) + msg.Timestamp = msg.Timestamp and tonumber(msg.Timestamp) -- Timestamp should always be provided by the CU + end + + local function updateLastKnownMessage(msg) + if msg.Timestamp >= LastKnownMessageTimestamp then + LastKnownMessageTimestamp = msg.Timestamp + LastKnownMessageId = msg.Id + end + end + + --- @class ParsedMessage + --- @field Id string + --- @field Action string + --- @field From string + --- @field Timestamp Timestamp + --- @field Tags table + --- @field ioEvent ARIOEvent + --- @field Cast boolean? + --- @field reply? fun(response: any) + + --- @param handlerName string + --- @param pattern fun(msg: ParsedMessage):'continue'|boolean + --- @param handleFn fun(msg: ParsedMessage) + --- @param critical boolean? + --- @param printEvent boolean? + local function addEventingHandler(handlerName, pattern, handleFn, critical, printEvent) + critical = critical or false + printEvent = printEvent == nil and true or printEvent + Handlers.add(handlerName, pattern, function(msg) + -- Store the old balances to compare after the handler has run for patching state + -- Only do this for the last handler to avoid unnecessary copying + local oldBalances = nil + if pattern(msg) ~= "continue" then + oldBalances = utils.deepCopy(Balances) + end + -- add an ARIOEvent to the message if it doesn't exist + msg.ioEvent = msg.ioEvent or ARIOEvent(msg) + -- global handler for all eventing errors, so we can log them and send a notice to the sender for non critical errors and discard the memory on critical errors + local status, resultOrError = eventingPcall(msg.ioEvent, function(error) + --- non critical errors will send an invalid notice back to the caller with the error information, memory is not discarded + Send(msg, { + Target = msg.From, + Action = "Invalid-" .. utils.toTrainCase(handlerName) .. "-Notice", + Error = tostring(error), + Data = tostring(error), + }) + end, handleFn, msg) + if not status and critical then + local errorEvent = ARIOEvent(msg) + -- For critical handlers we want to make sure the event data gets sent to the CU for processing, but that the memory is discarded on failures + -- These handlers (distribute, prune) severely modify global state, and partial updates are dangerous. + -- So we json encode the error and the event data and then throw, so the CU will discard the memory and still process the event data. + -- An alternative approach is to modify the implementation of ao.result - to also return the Output on error. + -- Reference: https://github.com/permaweb/ao/blob/76a618722b201430a372894b3e2753ac01e63d3d/dev-cli/src/starters/lua/ao.lua#L284-L287 + local errorWithEvent = tostring(resultOrError) .. "\n" .. errorEvent:toJSON() + error(errorWithEvent, 0) -- 0 ensures not to include this line number in the error message + end + + -- Send patch message to HB + if oldBalances then + hb.patchBalances(oldBalances) + end + + msg.ioEvent:addField("Handler-Memory-KiB-Used", collectgarbage("count"), false) + collectgarbage("collect") + msg.ioEvent:addField("Final-Memory-KiB-Used", collectgarbage("count"), false) + + if printEvent then + msg.ioEvent:printEvent() + end + end) + end + + addEventingHandler("sanitize", function() + return "continue" + end, function(msg) + assertAndSanitizeInputs(msg) + updateLastKnownMessage(msg) + end, CRITICAL, false) + + -- NOTE: THIS IS A CRITICAL HANDLER AND WILL DISCARD THE MEMORY ON ERROR + addEventingHandler("prune", function() + return "continue" -- continue is a pattern that matches every message and continues to the next handler that matches the tags + end, function(msg) + local epochIndex = epochs.getEpochIndexForTimestamp(msg.Timestamp) + msg.ioEvent:addField("Epoch-Index", epochIndex) + + local previousStateSupplies = { + protocolBalance = Balances[ao.id], + lastKnownCirculatingSupply = LastKnownCirculatingSupply, + lastKnownLockedSupply = LastKnownLockedSupply, + lastKnownStakedSupply = LastKnownStakedSupply, + lastKnownDelegatedSupply = LastKnownDelegatedSupply, + lastKnownWithdrawSupply = LastKnownWithdrawSupply, + lastKnownTotalSupply = token.lastKnownTotalTokenSupply(), + } + + if msg.Tags["Force-Prune"] then + print("Force prune provided, resetting all prune timestamps") + gar.scheduleNextGatewaysPruning(0) + gar.scheduleNextRedelegationsPruning(0) + arns.scheduleNextReturnedNamesPrune(0) + arns.scheduleNextRecordsPrune(0) + primaryNames.scheduleNextPrimaryNamesPruning(0) + vaults.scheduleNextVaultsPruning(0) + end + + print("Pruning state at timestamp: " .. msg.Timestamp) + local prunedStateResult = prune.pruneState(msg.Timestamp, msg.Id, LastGracePeriodEntryEndTimestamp) + if prunedStateResult then + local prunedRecordsCount = utils.lengthOfTable(prunedStateResult.prunedRecords or {}) + if prunedRecordsCount > 0 then + local prunedRecordNames = {} + for name, _ in pairs(prunedStateResult.prunedRecords) do + table.insert(prunedRecordNames, name) + end + msg.ioEvent:addField("Pruned-Records", prunedRecordNames) + msg.ioEvent:addField("Pruned-Records-Count", prunedRecordsCount) + msg.ioEvent:addField("Records-Count", utils.lengthOfTable(NameRegistry.records)) + end + local newGracePeriodRecordsCount = utils.lengthOfTable(prunedStateResult.newGracePeriodRecords or {}) + if newGracePeriodRecordsCount > 0 then + local newGracePeriodRecordNames = {} + for name, record in pairs(prunedStateResult.newGracePeriodRecords) do + table.insert(newGracePeriodRecordNames, name) + if record.endTimestamp > LastGracePeriodEntryEndTimestamp then + LastGracePeriodEntryEndTimestamp = record.endTimestamp + end + end + msg.ioEvent:addField("New-Grace-Period-Records", newGracePeriodRecordNames) + msg.ioEvent:addField("New-Grace-Period-Records-Count", newGracePeriodRecordsCount) + msg.ioEvent:addField("Last-Grace-Period-Entry-End-Timestamp", LastGracePeriodEntryEndTimestamp) + end + local prunedReturnedNames = prunedStateResult.prunedReturnedNames or {} + local prunedReturnedNamesCount = utils.lengthOfTable(prunedReturnedNames) + if prunedReturnedNamesCount > 0 then + msg.ioEvent:addField("Pruned-Returned-Names", prunedReturnedNames) + msg.ioEvent:addField("Pruned-Returned-Name-Count", prunedReturnedNamesCount) + end + local prunedReserved = prunedStateResult.prunedReserved or {} + local prunedReservedCount = utils.lengthOfTable(prunedReserved) + if prunedReservedCount > 0 then + msg.ioEvent:addField("Pruned-Reserved", prunedReserved) + msg.ioEvent:addField("Pruned-Reserved-Count", prunedReservedCount) + end + local prunedVaultsCount = utils.lengthOfTable(prunedStateResult.prunedVaults or {}) + if prunedVaultsCount > 0 then + msg.ioEvent:addField("Pruned-Vaults", prunedStateResult.prunedVaults) + msg.ioEvent:addField("Pruned-Vaults-Count", prunedVaultsCount) + for _, vault in pairs(prunedStateResult.prunedVaults) do + LastKnownLockedSupply = LastKnownLockedSupply - vault.balance + LastKnownCirculatingSupply = LastKnownCirculatingSupply + vault.balance + end + end + + local pruneGatewaysResult = prunedStateResult.pruneGatewaysResult or {} + addPruneGatewaysResult(msg.ioEvent, pruneGatewaysResult) + + local prunedPrimaryNameRequests = prunedStateResult.prunedPrimaryNameRequests or {} + local prunedRequestsCount = utils.lengthOfTable(prunedPrimaryNameRequests) + if prunedRequestsCount > 0 then + msg.ioEvent:addField("Pruned-Requests-Count", prunedRequestsCount) + end + + addNextPruneTimestampsResults(msg.ioEvent, prunedStateResult) + end + + -- add supply data if it has changed since the last state + if + LastKnownCirculatingSupply ~= previousStateSupplies.lastKnownCirculatingSupply + or LastKnownLockedSupply ~= previousStateSupplies.lastKnownLockedSupply + or LastKnownStakedSupply ~= previousStateSupplies.lastKnownStakedSupply + or LastKnownDelegatedSupply ~= previousStateSupplies.lastKnownDelegatedSupply + or LastKnownWithdrawSupply ~= previousStateSupplies.lastKnownWithdrawSupply + or Balances[ao.id] ~= previousStateSupplies.protocolBalance + or token.lastKnownTotalTokenSupply() ~= previousStateSupplies.lastKnownTotalSupply + then + addSupplyData(msg.ioEvent) + end + end, CRITICAL, false) + + -- Write handlers + addEventingHandler(ActionMap.Transfer, utils.hasMatchingTag("Action", ActionMap.Transfer), function(msg) + -- assert recipient is a valid arweave address + local recipient = msg.Tags.Recipient + local quantity = msg.Tags.Quantity + local allowUnsafeAddresses = msg.Tags["Allow-Unsafe-Addresses"] or false + assert(utils.isValidAddress(recipient, allowUnsafeAddresses), "Invalid recipient") + assert( + quantity and quantity > 0 and utils.isInteger(quantity), + "Invalid quantity. Must be integer greater than 0" + ) + assert(recipient ~= msg.From, "Cannot transfer to self") + + msg.ioEvent:addField("RecipientFormatted", recipient) + + local result = balances.transfer(recipient, msg.From, quantity, allowUnsafeAddresses) + if result ~= nil then + local senderNewBalance = result[msg.From] + local recipientNewBalance = result[recipient] + msg.ioEvent:addField("SenderPreviousBalance", senderNewBalance + quantity) + msg.ioEvent:addField("SenderNewBalance", senderNewBalance) + msg.ioEvent:addField("RecipientPreviousBalance", recipientNewBalance - quantity) + msg.ioEvent:addField("RecipientNewBalance", recipientNewBalance) + end + + -- if the sender is the protocol, then we need to update the circulating supply as tokens are now in circulation + if msg.From == ao.id then + LastKnownCirculatingSupply = LastKnownCirculatingSupply + quantity + addSupplyData(msg.ioEvent) + end + + -- Casting implies that the sender does not want a response - Reference: https://elixirforum.com/t/what-is-the-etymology-of-genserver-cast/33610/3 + if not msg.Cast then + -- Debit-Notice message template, that is sent to the Sender of the transfer + local debitNotice = { + Target = msg.From, + Action = "Debit-Notice", + Recipient = recipient, + Quantity = tostring(quantity), + ["Allow-Unsafe-Addresses"] = tostring(allowUnsafeAddresses), + Data = "You transferred " .. msg.Tags.Quantity .. " to " .. recipient, + } + -- Credit-Notice message template, that is sent to the Recipient of the transfer + local creditNotice = { + Target = recipient, + Action = "Credit-Notice", + Sender = msg.From, + Quantity = tostring(quantity), + ["Allow-Unsafe-Addresses"] = tostring(allowUnsafeAddresses), + Data = "You received " .. msg.Tags.Quantity .. " from " .. msg.From, + } + + -- Add forwarded tags to the credit and debit notice messages + local didForwardTags = false + for tagName, tagValue in pairs(msg) do + -- Tags beginning with "X-" are forwarded + if string.sub(tagName, 1, 2) == "X-" then + debitNotice[tagName] = tagValue + creditNotice[tagName] = tagValue + didForwardTags = true + msg.ioEvent:addField(tagName, tagValue) + end + end + if didForwardTags then + msg.ioEvent:addField("ForwardedTags", "true") + end + + -- Send Debit-Notice and Credit-Notice + Send(msg, debitNotice) + Send(msg, creditNotice) + end + end) + + addEventingHandler(ActionMap.CreateVault, utils.hasMatchingTag("Action", ActionMap.CreateVault), function(msg) + local quantity = msg.Tags.Quantity + local lockLengthMs = msg.Tags["Lock-Length"] + local msgId = msg.Id + assert( + lockLengthMs and lockLengthMs > 0 and utils.isInteger(lockLengthMs), + "Invalid lock length. Must be integer greater than 0" + ) + assert( + quantity and utils.isInteger(quantity) and quantity >= constants.MIN_VAULT_SIZE, + "Invalid quantity. Must be integer greater than or equal to " .. constants.MIN_VAULT_SIZE .. " mARIO" + ) + local vault = vaults.createVault(msg.From, quantity, lockLengthMs, msg.Timestamp, msgId) + + if vault ~= nil then + msg.ioEvent:addField("Vault-Id", msgId) + msg.ioEvent:addField("Vault-Balance", vault.balance) + msg.ioEvent:addField("Vault-Start-Timestamp", vault.startTimestamp) + msg.ioEvent:addField("Vault-End-Timestamp", vault.endTimestamp) + end + + LastKnownLockedSupply = LastKnownLockedSupply + quantity + LastKnownCirculatingSupply = LastKnownCirculatingSupply - quantity + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { + Action = ActionMap.CreateVault .. "-Notice", + ["Vault-Id"] = msgId, + }, + Data = json.encode(vault), + }) + end) + + addEventingHandler( + ActionMap.VaultedTransfer, + utils.hasMatchingTag("Action", ActionMap.VaultedTransfer), + function(msg) + local recipient = msg.Tags.Recipient + local quantity = msg.Tags.Quantity + local lockLengthMs = msg.Tags["Lock-Length"] + local msgId = msg.Id + local allowUnsafeAddresses = msg.Tags["Allow-Unsafe-Addresses"] or false + local revokable = msg.Tags.Revokable or false + assert(utils.isValidAddress(recipient, allowUnsafeAddresses), "Invalid recipient") + assert( + lockLengthMs and lockLengthMs > 0 and utils.isInteger(lockLengthMs), + "Invalid lock length. Must be integer greater than 0" + ) + assert( + quantity and utils.isInteger(quantity) and quantity >= constants.MIN_VAULT_SIZE, + "Invalid quantity. Must be integer greater than or equal to " .. constants.MIN_VAULT_SIZE .. " mARIO" + ) + assert(recipient ~= msg.From, "Cannot transfer to self") + + local vault = vaults.vaultedTransfer( + msg.From, + recipient, + quantity, + lockLengthMs, + msg.Timestamp, + msgId, + allowUnsafeAddresses, + revokable + ) + + msg.ioEvent:addField("Vault-Id", msgId) + msg.ioEvent:addField("Vault-Balance", vault.balance) + msg.ioEvent:addField("Vault-Start-Timestamp", vault.startTimestamp) + msg.ioEvent:addField("Vault-End-Timestamp", vault.endTimestamp) + if revokable then + msg.ioEvent:addField("Vault-Controller", msg.From) + end + + LastKnownLockedSupply = LastKnownLockedSupply + quantity + LastKnownCirculatingSupply = LastKnownCirculatingSupply - quantity + addSupplyData(msg.ioEvent) + + -- sender gets an immediate debit notice as the quantity is debited from their balance + Send(msg, { + Target = msg.From, + Recipient = recipient, + Quantity = quantity, + Tags = { + Action = "Debit-Notice", + ["Vault-Id"] = msgId, + ["Allow-Unsafe-Addresses"] = tostring(allowUnsafeAddresses), + }, + Data = json.encode(vault), + }) + -- to the receiver, they get a vault notice + Send(msg, { + Target = recipient, + Quantity = quantity, + Sender = msg.From, + Tags = { + Action = ActionMap.CreateVault .. "-Notice", + ["Vault-Id"] = msgId, + ["Allow-Unsafe-Addresses"] = tostring(allowUnsafeAddresses), + }, + Data = json.encode(vault), + }) + end + ) + + addEventingHandler(ActionMap.RevokeVault, utils.hasMatchingTag("Action", ActionMap.RevokeVault), function(msg) + local vaultId = msg.Tags["Vault-Id"] + local recipient = msg.Tags.Recipient + assert(utils.isValidAddress(vaultId, true), "Invalid vault id") + assert(utils.isValidAddress(recipient, true), "Invalid recipient") + + local vault = vaults.revokeVault(msg.From, recipient, vaultId, msg.Timestamp) + + msg.ioEvent:addField("Vault-Id", vaultId) + msg.ioEvent:addField("Vault-Recipient", recipient) + msg.ioEvent:addField("Vault-Controller", vault.controller) + msg.ioEvent:addField("Vault-Balance", vault.balance) + msg.ioEvent:addField("Vault-Start-Timestamp", vault.startTimestamp) + msg.ioEvent:addField("Vault-End-Timestamp", vault.endTimestamp) + + LastKnownLockedSupply = LastKnownLockedSupply - vault.balance + LastKnownCirculatingSupply = LastKnownCirculatingSupply + vault.balance + addSupplyData(msg.ioEvent) + + -- to the controller, they get a credit notice + Send(msg, { + Target = msg.From, + Recipient = recipient, + Quantity = vault.balance, + Tags = { Action = "Credit-Notice", ["Vault-Id"] = vaultId }, + Data = json.encode(vault), + }) + + -- to the receiver, they get a revoke vault notice + Send(msg, { + Target = recipient, + Quantity = vault.balance, + Sender = msg.From, + Tags = { Action = ActionMap.RevokeVault .. "-Notice", ["Vault-Id"] = vaultId }, + Data = json.encode(vault), + }) + end) + + addEventingHandler(ActionMap.ExtendVault, utils.hasMatchingTag("Action", ActionMap.ExtendVault), function(msg) + local vaultId = msg.Tags["Vault-Id"] + local extendLengthMs = msg.Tags["Extend-Length"] + assert(utils.isValidAddress(vaultId, true), "Invalid vault id") + assert( + extendLengthMs and extendLengthMs > 0 and utils.isInteger(extendLengthMs), + "Invalid extension length. Must be integer greater than 0" + ) + + local vault = vaults.extendVault(msg.From, extendLengthMs, msg.Timestamp, vaultId) + + if vault ~= nil then + msg.ioEvent:addField("Vault-Id", vaultId) + msg.ioEvent:addField("Vault-Balance", vault.balance) + msg.ioEvent:addField("Vault-Start-Timestamp", vault.startTimestamp) + msg.ioEvent:addField("Vault-End-Timestamp", vault.endTimestamp) + msg.ioEvent:addField("Vault-Prev-End-Timestamp", vault.endTimestamp - extendLengthMs) + end + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.ExtendVault .. "-Notice" }, + Data = json.encode(vault), + }) + end) + + addEventingHandler(ActionMap.IncreaseVault, utils.hasMatchingTag("Action", ActionMap.IncreaseVault), function(msg) + local vaultId = msg.Tags["Vault-Id"] + local quantity = msg.Tags.Quantity + assert(utils.isValidAddress(vaultId, true), "Invalid vault id") + assert( + quantity and quantity > 0 and utils.isInteger(quantity), + "Invalid quantity. Must be integer greater than 0" + ) + + local vault = vaults.increaseVault(msg.From, quantity, vaultId, msg.Timestamp) + + if vault ~= nil then + msg.ioEvent:addField("Vault-Id", vaultId) + msg.ioEvent:addField("VaultBalance", vault.balance) + msg.ioEvent:addField("VaultPrevBalance", vault.balance - quantity) + msg.ioEvent:addField("VaultStartTimestamp", vault.startTimestamp) + msg.ioEvent:addField("VaultEndTimestamp", vault.endTimestamp) + end + + LastKnownLockedSupply = LastKnownLockedSupply + quantity + LastKnownCirculatingSupply = LastKnownCirculatingSupply - quantity + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.IncreaseVault .. "-Notice" }, + Data = json.encode(vault), + }) + end) + + addEventingHandler(ActionMap.BuyName, utils.hasMatchingTag("Action", ActionMap.BuyName), function(msg) + local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil + local purchaseType = msg.Tags["Purchase-Type"] and string.lower(msg.Tags["Purchase-Type"]) or "lease" + local years = msg.Tags.Years or nil + local processId = msg.Tags["Process-Id"] + local fundFrom = msg.Tags["Fund-From"] + local allowUnsafeProcessId = msg.Tags["Allow-Unsafe-Addresses"] + assert( + type(purchaseType) == "string" and purchaseType == "lease" or purchaseType == "permabuy", + "Invalid purchase type" + ) + arns.assertValidArNSName(name) + assert(utils.isValidAddress(processId, true), "Process Id must be a valid address.") + if years then + assert( + years >= 1 and years <= 5 and utils.isInteger(years), + "Invalid years. Must be integer between 1 and 5" + ) + end + assertValidFundFrom(fundFrom) + + msg.ioEvent:addField("Name-Length", #name) + + local result = arns.buyRecord( + name, + purchaseType, + years, + msg.From, + msg.Timestamp, + processId, + msg.Id, + fundFrom, + allowUnsafeProcessId + ) + local record = result.record + addRecordResultFields(msg.ioEvent, result) + addSupplyData(msg.ioEvent) + + msg.ioEvent:addField("Records-Count", utils.lengthOfTable(NameRegistry.records)) + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.BuyName .. "-Notice", Name = name }, + Data = json.encode({ + name = name, + startTimestamp = record.startTimestamp, + endTimestamp = record.endTimestamp, + undernameLimit = record.undernameLimit, + type = record.type, + purchasePrice = record.purchasePrice, + processId = record.processId, + fundingResult = fundFrom and result.fundingResult or nil, + fundingPlan = fundFrom and result.fundingPlan or nil, + baseRegistrationFee = result.baseRegistrationFee, + remainingBalance = result.remainingBalance, + returnedName = result.returnedName, + }), + }) + + -- If was returned name, send a credit notice to the initiator + if result.returnedName ~= nil then + Send(msg, { + Target = result.returnedName.initiator, + Action = "Credit-Notice", + Quantity = tostring(result.returnedName.rewardForInitiator), + Data = json.encode({ + name = name, + buyer = msg.From, + rewardForInitiator = result.returnedName.rewardForInitiator, + rewardForProtocol = result.returnedName.rewardForProtocol, + record = result.record, + }), + }) + end + end) + + addEventingHandler("upgradeName", utils.hasMatchingTag("Action", ActionMap.UpgradeName), function(msg) + local fundFrom = msg.Tags["Fund-From"] + local name = string.lower(msg.Tags.Name) + assert(type(name) == "string", "Invalid name") + assertValidFundFrom(fundFrom) + + local result = arns.upgradeRecord(msg.From, name, msg.Timestamp, msg.Id, fundFrom) + + local record = {} + if result ~= nil then + record = result.record + addRecordResultFields(msg.ioEvent, result) + addSupplyData(msg.ioEvent) + end + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.UpgradeName .. "-Notice", Name = name }, + Data = json.encode(fundFrom and result or { + name = name, + startTimestamp = record.startTimestamp, + endTimestamp = record.endTimestamp, + undernameLimit = record.undernameLimit, + purchasePrice = record.purchasePrice, + processId = record.processId, + type = record.type, + }), + }) + end) + + addEventingHandler(ActionMap.ExtendLease, utils.hasMatchingTag("Action", ActionMap.ExtendLease), function(msg) + local fundFrom = msg.Tags["Fund-From"] + local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil + local years = msg.Tags.Years + assert(type(name) == "string", "Invalid name") + assert( + years and years > 0 and years < 5 and utils.isInteger(years), + "Invalid years. Must be integer between 1 and 5" + ) + assertValidFundFrom(fundFrom) + local result = arns.extendLease(msg.From, name, years, msg.Timestamp, msg.Id, fundFrom) + local recordResult = {} + if result ~= nil then + addRecordResultFields(msg.ioEvent, result) + addSupplyData(msg.ioEvent) + recordResult = result.record + end + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.ExtendLease .. "-Notice", Name = name }, + Data = json.encode(fundFrom and result or recordResult), + }) + end) + + addEventingHandler( + ActionMap.IncreaseUndernameLimit, + utils.hasMatchingTag("Action", ActionMap.IncreaseUndernameLimit), + function(msg) + local fundFrom = msg.Tags["Fund-From"] + local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil + local quantity = msg.Tags.Quantity + assert(type(name) == "string", "Invalid name") + assert( + quantity and quantity > 0 and utils.isInteger(quantity), + "Invalid quantity. Must be an integer value greater than 0" + ) + assertValidFundFrom(fundFrom) + + local result = arns.increaseUndernameLimit(msg.From, name, quantity, msg.Timestamp, msg.Id, fundFrom) + local recordResult = {} + if result ~= nil then + recordResult = result.record + addRecordResultFields(msg.ioEvent, result) + msg.ioEvent:addField("Previous-Undername-Limit", recordResult.undernameLimit - msg.Tags.Quantity) + addSupplyData(msg.ioEvent) + end + + Send(msg, { + Target = msg.From, + Tags = { + Action = ActionMap.IncreaseUndernameLimit .. "-Notice", + Name = name, + }, + Data = json.encode(fundFrom and result or recordResult), + }) + end + ) + + function assertTokenCostTags(msg) + local intentType = msg.Tags.Intent + local validIntents = utils.createLookupTable({ + ActionMap.BuyName, + ActionMap.ExtendLease, + ActionMap.IncreaseUndernameLimit, + ActionMap.UpgradeName, + ActionMap.PrimaryNameRequest, + }) + assert( + intentType and type(intentType) == "string" and validIntents[intentType], + "Intent must be valid registry interaction (e.g. Buy-Name, Extend-Lease, Increase-Undername-Limit, Upgrade-Name, Primary-Name-Request). Provided intent: " + .. (intentType or "nil") + ) + if intentType == ActionMap.PrimaryNameRequest then + primaryNames.assertValidPrimaryName(msg.Tags.Name) + else + arns.assertValidArNSName(msg.Tags.Name) + end + + -- if years is provided, assert it is a number and integer between 1 and 5 + if msg.Tags.Years then + assert(utils.isInteger(msg.Tags.Years), "Invalid years. Must be integer") + assert(msg.Tags.Years > 0 and msg.Tags.Years < 6, "Invalid years. Must be between 1 and 5") + end + + -- if quantity provided must be a number and integer greater than 0 + if msg.Tags.Quantity then + assert(utils.isInteger(msg.Tags.Quantity), "Invalid quantity. Must be integer") + assert(msg.Tags.Quantity > 0, "Invalid quantity. Must be greater than 0") + end + end + + addEventingHandler(ActionMap.TokenCost, utils.hasMatchingTag("Action", ActionMap.TokenCost), function(msg) + assertTokenCostTags(msg) + local intent = msg.Tags.Intent + local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil + local years = msg.Tags.Years or nil + local quantity = msg.Tags.Quantity or nil + local purchaseType = msg.Tags["Purchase-Type"] or "lease" + + local intendedAction = { + intent = intent, + name = name, + years = years, + quantity = quantity, + purchaseType = purchaseType, + currentTimestamp = msg.Timestamp, + from = msg.From, + } + + local tokenCostResult = arns.getTokenCost(intendedAction) + local tokenCost = tokenCostResult.tokenCost + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.TokenCost .. "-Notice", ["Token-Cost"] = tostring(tokenCost) }, + Data = json.encode(tokenCost), + }) + end) + + addEventingHandler(ActionMap.CostDetails, utils.hasMatchingTag("Action", ActionMap.CostDetails), function(msg) + local fundFrom = msg.Tags["Fund-From"] + local name = string.lower(msg.Tags.Name) + local years = msg.Tags.Years or 1 + local quantity = msg.Tags.Quantity + local purchaseType = msg.Tags["Purchase-Type"] or "lease" + assertTokenCostTags(msg) + assertValidFundFrom(fundFrom) + + local tokenCostAndFundingPlan = arns.getTokenCostAndFundingPlanForIntent( + msg.Tags.Intent, + name, + years, + quantity, + purchaseType, + msg.Timestamp, + msg.From, + fundFrom + ) + if not tokenCostAndFundingPlan then + return + end + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.CostDetails .. "-Notice" }, + Data = json.encode(tokenCostAndFundingPlan), + }) + end) + + addEventingHandler( + ActionMap.RegistrationFees, + utils.hasMatchingTag("Action", ActionMap.RegistrationFees), + function(msg) + local priceList = arns.getRegistrationFees() + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.RegistrationFees .. "-Notice" }, + Data = json.encode(priceList), + }) + end + ) + + addEventingHandler(ActionMap.JoinNetwork, utils.hasMatchingTag("Action", ActionMap.JoinNetwork), function(msg) + local updatedSettings = { + label = msg.Tags.Label, + note = msg.Tags.Note, + fqdn = msg.Tags.FQDN, + port = msg.Tags.Port or 443, + protocol = msg.Tags.Protocol or "https", + allowDelegatedStaking = msg.Tags["Allow-Delegated-Staking"] == "true" + or msg.Tags["Allow-Delegated-Staking"] == "allowlist", + allowedDelegates = msg.Tags["Allow-Delegated-Staking"] == "allowlist" + and utils.splitAndTrimString(msg.Tags["Allowed-Delegates"] or "", ",") + or nil, + minDelegatedStake = msg.Tags["Min-Delegated-Stake"], + delegateRewardShareRatio = msg.Tags["Delegate-Reward-Share-Ratio"] or 0, + properties = msg.Tags.Properties or "FH1aVetOoulPGqgYukj0VE0wIhDy90WiQoV3U2PeY44", + autoStake = msg.Tags["Auto-Stake"] == "true", + } + + local updatedServices = utils.safeDecodeJson(msg.Tags.Services) + local fromAddress = msg.From + local observerAddress = msg.Tags["Observer-Address"] or fromAddress + local stake = msg.Tags["Operator-Stake"] + + assert(not msg.Tags.Services or updatedServices, "Services must be a valid JSON string") + + msg.ioEvent:addField("Resolved-Observer-Address", observerAddress) + msg.ioEvent:addField("Sender-Previous-Balance", Balances[fromAddress] or 0) + + local gateway = + gar.joinNetwork(fromAddress, stake, updatedSettings, updatedServices, observerAddress, msg.Timestamp) + msg.ioEvent:addField("Sender-New-Balance", Balances[fromAddress] or 0) + if gateway ~= nil then + msg.ioEvent:addField("GW-Start-Timestamp", gateway.startTimestamp) + end + local gwStats = gatewayStats() + msg.ioEvent:addField("Joined-Gateways-Count", gwStats.joined) + msg.ioEvent:addField("Leaving-Gateways-Count", gwStats.leaving) + + LastKnownCirculatingSupply = LastKnownCirculatingSupply - stake + LastKnownStakedSupply = LastKnownStakedSupply + stake + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.JoinNetwork .. "-Notice" }, + Data = json.encode(gateway), + }) + end) + + addEventingHandler(ActionMap.LeaveNetwork, utils.hasMatchingTag("Action", ActionMap.LeaveNetwork), function(msg) + local unsafeGatewayBeforeLeaving = gar.getGatewayUnsafe(msg.From) + local gwPrevTotalDelegatedStake = 0 + local gwPrevStake = 0 + if unsafeGatewayBeforeLeaving ~= nil then + gwPrevTotalDelegatedStake = unsafeGatewayBeforeLeaving.totalDelegatedStake + gwPrevStake = unsafeGatewayBeforeLeaving.operatorStake + end + + assert(unsafeGatewayBeforeLeaving, "Gateway not found") + + local gateway = gar.leaveNetwork(msg.From, msg.Timestamp, msg.Id) + + if gateway ~= nil then + msg.ioEvent:addField("GW-Vaults-Count", utils.lengthOfTable(gateway.vaults or {})) + local exitVault = gateway.vaults[msg.From] + local withdrawVault = gateway.vaults[msg.Id] + local previousStake = exitVault.balance + if exitVault ~= nil then + msg.ioEvent:addFieldsWithPrefixIfExist( + exitVault, + "Exit-Vault-", + { "balance", "startTimestamp", "endTimestamp" } + ) + end + if withdrawVault ~= nil then + previousStake = previousStake + withdrawVault.balance + msg.ioEvent:addFieldsWithPrefixIfExist( + withdrawVault, + "Withdraw-Vault-", + { "balance", "startTimestamp", "endTimestamp" } + ) + end + msg.ioEvent:addField("Previous-Operator-Stake", previousStake) + msg.ioEvent:addFieldsWithPrefixIfExist( + gateway, + "GW-", + { "totalDelegatedStake", "observerAddress", "startTimestamp", "endTimestamp" } + ) + msg.ioEvent:addFields(gateway.stats or {}) + end + + local gwStats = gatewayStats() + msg.ioEvent:addField("Joined-Gateways-Count", gwStats.joined) + msg.ioEvent:addField("Leaving-Gateways-Count", gwStats.leaving) + + LastKnownStakedSupply = LastKnownStakedSupply - gwPrevStake - gwPrevTotalDelegatedStake + LastKnownWithdrawSupply = LastKnownWithdrawSupply + gwPrevStake + gwPrevTotalDelegatedStake + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.LeaveNetwork .. "-Notice" }, + Data = json.encode(gateway), + }) + end) + + addEventingHandler( + ActionMap.IncreaseOperatorStake, + utils.hasMatchingTag("Action", ActionMap.IncreaseOperatorStake), + function(msg) + local quantity = msg.Tags.Quantity + assert( + quantity and utils.isInteger(quantity) and quantity > 0, + "Invalid quantity. Must be integer greater than 0" + ) + + msg.ioEvent:addField("Sender-Previous-Balance", Balances[msg.From]) + local gateway = gar.increaseOperatorStake(msg.From, quantity) + + msg.ioEvent:addField("Sender-New-Balance", Balances[msg.From]) + if gateway ~= nil then + msg.ioEvent:addField("New-Operator-Stake", gateway.operatorStake) + msg.ioEvent:addField("Previous-Operator-Stake", gateway.operatorStake - quantity) + end + + LastKnownCirculatingSupply = LastKnownCirculatingSupply - quantity + LastKnownStakedSupply = LastKnownStakedSupply + quantity + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.IncreaseOperatorStake .. "-Notice" }, + Data = json.encode(gateway), + }) + end + ) + + addEventingHandler( + ActionMap.DecreaseOperatorStake, + utils.hasMatchingTag("Action", ActionMap.DecreaseOperatorStake), + function(msg) + local quantity = msg.Tags.Quantity + local instantWithdraw = msg.Tags.Instant and msg.Tags.Instant == "true" or false + assert( + quantity and utils.isInteger(quantity) and quantity > constants.MIN_WITHDRAWAL_AMOUNT, + "Invalid quantity. Must be integer greater than " .. constants.MIN_WITHDRAWAL_AMOUNT + ) + assert( + msg.Tags.Instant == nil or (msg.Tags.Instant == "true" or msg.Tags.Instant == "false"), + "Instant must be a string with value 'true' or 'false'" + ) + + msg.ioEvent:addField("Sender-Previous-Balance", Balances[msg.From]) + + local result = gar.decreaseOperatorStake(msg.From, quantity, msg.Timestamp, msg.Id, instantWithdraw) + local decreaseOperatorStakeResult = { + gateway = result and result.gateway or {}, + penaltyRate = result and result.penaltyRate or 0, + expeditedWithdrawalFee = result and result.expeditedWithdrawalFee or 0, + amountWithdrawn = result and result.amountWithdrawn or 0, + } + + msg.ioEvent:addField("Sender-New-Balance", Balances[msg.From]) -- should be unchanged + if result ~= nil and result.gateway ~= nil then + local gateway = result.gateway + local previousStake = gateway.operatorStake + quantity + msg.ioEvent:addField("New-Operator-Stake", gateway.operatorStake) + msg.ioEvent:addField("GW-Vaults-Count", utils.lengthOfTable(gateway.vaults or {})) + if instantWithdraw then + msg.ioEvent:addField("Instant-Withdrawal", instantWithdraw) + msg.ioEvent:addField("Instant-Withdrawal-Fee", result.expeditedWithdrawalFee) + msg.ioEvent:addField("Amount-Withdrawn", result.amountWithdrawn) + msg.ioEvent:addField("Penalty-Rate", result.penaltyRate) + end + local decreaseStakeVault = gateway.vaults[msg.Id] + if decreaseStakeVault ~= nil then + previousStake = previousStake + decreaseStakeVault.balance + msg.ioEvent:addFieldsWithPrefixIfExist( + decreaseStakeVault, + "Decrease-Stake-Vault-", + { "balance", "startTimestamp", "endTimestamp" } + ) + end + msg.ioEvent:addField("Previous-Operator-Stake", previousStake) + end + + LastKnownStakedSupply = LastKnownStakedSupply - quantity + if instantWithdraw then + LastKnownCirculatingSupply = LastKnownCirculatingSupply + decreaseOperatorStakeResult.amountWithdrawn + else + LastKnownWithdrawSupply = LastKnownWithdrawSupply + quantity + end + + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { + Action = ActionMap.DecreaseOperatorStake .. "-Notice", + ["Penalty-Rate"] = tostring(decreaseOperatorStakeResult.penaltyRate), + ["Expedited-Withdrawal-Fee"] = tostring(decreaseOperatorStakeResult.expeditedWithdrawalFee), + ["Amount-Withdrawn"] = tostring(decreaseOperatorStakeResult.amountWithdrawn), + }, + Data = json.encode(decreaseOperatorStakeResult.gateway), + }) + end + ) + + addEventingHandler(ActionMap.DelegateStake, utils.hasMatchingTag("Action", ActionMap.DelegateStake), function(msg) + local gatewayTarget = msg.Tags.Target or msg.Tags.Address + local quantity = msg.Tags.Quantity + assert(utils.isValidAddress(gatewayTarget, true), "Invalid gateway address") + assert( + msg.Tags.Quantity and msg.Tags.Quantity > 0 and utils.isInteger(msg.Tags.Quantity), + "Invalid quantity. Must be integer greater than 0" + ) + + msg.ioEvent:addField("Target-Formatted", gatewayTarget) + + local gateway = gar.delegateStake(msg.From, gatewayTarget, quantity, msg.Timestamp) + local delegateResult = {} + if gateway ~= nil then + local newStake = gateway.delegates[msg.From].delegatedStake + msg.ioEvent:addField("Previous-Stake", newStake - quantity) + msg.ioEvent:addField("New-Stake", newStake) + msg.ioEvent:addField("Gateway-Total-Delegated-Stake", gateway.totalDelegatedStake) + delegateResult = gateway.delegates[msg.From] + end + + LastKnownCirculatingSupply = LastKnownCirculatingSupply - quantity + LastKnownDelegatedSupply = LastKnownDelegatedSupply + quantity + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.DelegateStake .. "-Notice", Gateway = gatewayTarget }, + Data = json.encode(delegateResult), + }) + end) + + addEventingHandler( + ActionMap.CancelWithdrawal, + utils.hasMatchingTag("Action", ActionMap.CancelWithdrawal), + function(msg) + local gatewayAddress = msg.Tags.Target or msg.Tags.Address or msg.From + local vaultId = msg.Tags["Vault-Id"] + assert(utils.isValidAddress(gatewayAddress, true), "Invalid gateway address") + assert(utils.isValidAddress(vaultId, true), "Invalid vault id") + + msg.ioEvent:addField("Target-Formatted", gatewayAddress) + + local result = gar.cancelGatewayWithdrawal(msg.From, gatewayAddress, vaultId) + local updatedGateway = {} + if result ~= nil then + updatedGateway = result.gateway + local vaultBalance = result.vaultBalance + local previousOperatorStake = result.previousOperatorStake + local newOperatorStake = result.totalOperatorStake + local previousTotalDelegatedStake = result.previousTotalDelegatedStake + local newTotalDelegatedStake = result.totalDelegatedStake + local operatorStakeChange = newOperatorStake - previousOperatorStake + local delegatedStakeChange = newTotalDelegatedStake - previousTotalDelegatedStake + msg.ioEvent:addField("Previous-Operator-Stake", previousOperatorStake) + msg.ioEvent:addField("New-Operator-Stake", newOperatorStake) + msg.ioEvent:addField("Previous-Total-Delegated-Stake", previousTotalDelegatedStake) + msg.ioEvent:addField("New-Total-Delegated-Stake", newTotalDelegatedStake) + msg.ioEvent:addField("Stake-Amount-Withdrawn", vaultBalance) + LastKnownStakedSupply = LastKnownStakedSupply + operatorStakeChange + LastKnownDelegatedSupply = LastKnownDelegatedSupply + delegatedStakeChange + LastKnownWithdrawSupply = LastKnownWithdrawSupply - vaultBalance + addSupplyData(msg.ioEvent) + end + + Send(msg, { + Target = msg.From, + Tags = { + Action = ActionMap.CancelWithdrawal .. "-Notice", + Address = gatewayAddress, + ["Vault-Id"] = msg.Tags["Vault-Id"], + }, + Data = json.encode(updatedGateway), + }) + end + ) + + addEventingHandler( + ActionMap.InstantWithdrawal, + utils.hasMatchingTag("Action", ActionMap.InstantWithdrawal), + function(msg) + local target = msg.Tags.Target or msg.Tags.Address or msg.From -- if not provided, use sender + local vaultId = msg.Tags["Vault-Id"] + msg.ioEvent:addField("Target-Formatted", target) + assert(utils.isValidAddress(target, true), "Invalid gateway address") + assert(utils.isValidAddress(vaultId, true), "Invalid vault id") + + local result = gar.instantGatewayWithdrawal(msg.From, target, vaultId, msg.Timestamp) + if result ~= nil then + local vaultBalance = result.vaultBalance + msg.ioEvent:addField("Stake-Amount-Withdrawn", vaultBalance) + msg.ioEvent:addField("Vault-Elapsed-Time", result.elapsedTime) + msg.ioEvent:addField("Vault-Remaining-Time", result.remainingTime) + msg.ioEvent:addField("Penalty-Rate", result.penaltyRate) + msg.ioEvent:addField("Instant-Withdrawal-Fee", result.expeditedWithdrawalFee) + msg.ioEvent:addField("Amount-Withdrawn", result.amountWithdrawn) + msg.ioEvent:addField("Previous-Vault-Balance", result.amountWithdrawn + result.expeditedWithdrawalFee) + LastKnownCirculatingSupply = LastKnownCirculatingSupply + result.amountWithdrawn + LastKnownWithdrawSupply = LastKnownWithdrawSupply + - result.amountWithdrawn + - result.expeditedWithdrawalFee + addSupplyData(msg.ioEvent) + Send(msg, { + Target = msg.From, + Tags = { + Action = ActionMap.InstantWithdrawal .. "-Notice", + Address = target, + ["Vault-Id"] = vaultId, + ["Amount-Withdrawn"] = tostring(result.amountWithdrawn), + ["Penalty-Rate"] = tostring(result.penaltyRate), + ["Expedited-Withdrawal-Fee"] = tostring(result.expeditedWithdrawalFee), + }, + Data = json.encode(result), + }) + end + end + ) + + addEventingHandler( + ActionMap.DecreaseDelegateStake, + utils.hasMatchingTag("Action", ActionMap.DecreaseDelegateStake), + function(msg) + local target = msg.Tags.Target or msg.Tags.Address + local quantity = msg.Tags.Quantity + local instantWithdraw = msg.Tags.Instant and msg.Tags.Instant == "true" or false + msg.ioEvent:addField("Target-Formatted", target) + msg.ioEvent:addField("Quantity", quantity) + assert( + quantity and utils.isInteger(quantity) and quantity > constants.MIN_WITHDRAWAL_AMOUNT, + "Invalid quantity. Must be integer greater than " .. constants.MIN_WITHDRAWAL_AMOUNT + ) + + local result = gar.decreaseDelegateStake(target, msg.From, quantity, msg.Timestamp, msg.Id, instantWithdraw) + local decreaseDelegateStakeResult = { + penaltyRate = result and result.penaltyRate or 0, + expeditedWithdrawalFee = result and result.expeditedWithdrawalFee or 0, + amountWithdrawn = result and result.amountWithdrawn or 0, + } + + msg.ioEvent:addField("Sender-New-Balance", Balances[msg.From]) -- should be unchanged + + if result ~= nil then + local newStake = result.updatedDelegate.delegatedStake + msg.ioEvent:addField("Previous-Stake", newStake + quantity) + msg.ioEvent:addField("New-Stake", newStake) + msg.ioEvent:addField("Gateway-Total-Delegated-Stake", result.gatewayTotalDelegatedStake) + + if instantWithdraw then + msg.ioEvent:addField("Instant-Withdrawal", instantWithdraw) + msg.ioEvent:addField("Instant-Withdrawal-Fee", result.expeditedWithdrawalFee) + msg.ioEvent:addField("Amount-Withdrawn", result.amountWithdrawn) + msg.ioEvent:addField("Penalty-Rate", result.penaltyRate) + end + + local newDelegateVaults = result.updatedDelegate.vaults + if newDelegateVaults ~= nil then + msg.ioEvent:addField("Vaults-Count", utils.lengthOfTable(newDelegateVaults)) + local newDelegateVault = newDelegateVaults[msg.Id] + if newDelegateVault ~= nil then + msg.ioEvent:addField("Vault-Id", msg.Id) + msg.ioEvent:addField("Vault-Balance", newDelegateVault.balance) + msg.ioEvent:addField("Vault-Start-Timestamp", newDelegateVault.startTimestamp) + msg.ioEvent:addField("Vault-End-Timestamp", newDelegateVault.endTimestamp) + end + end + end + + LastKnownDelegatedSupply = LastKnownDelegatedSupply - quantity + if not instantWithdraw then + LastKnownWithdrawSupply = LastKnownWithdrawSupply + quantity + end + LastKnownCirculatingSupply = LastKnownCirculatingSupply + decreaseDelegateStakeResult.amountWithdrawn + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { + Action = ActionMap.DecreaseDelegateStake .. "-Notice", + Address = target, + Quantity = quantity, + ["Penalty-Rate"] = tostring(decreaseDelegateStakeResult.penaltyRate), + ["Expedited-Withdrawal-Fee"] = tostring(decreaseDelegateStakeResult.expeditedWithdrawalFee), + ["Amount-Withdrawn"] = tostring(decreaseDelegateStakeResult.amountWithdrawn), + }, + Data = json.encode(result and result.updatedDelegate or {}), + }) + end + ) + + addEventingHandler( + ActionMap.UpdateGatewaySettings, + utils.hasMatchingTag("Action", ActionMap.UpdateGatewaySettings), + function(msg) + local unsafeGateway = gar.getGatewayUnsafe(msg.From) + local updatedServices = utils.safeDecodeJson(msg.Tags.Services) + + assert(unsafeGateway, "Gateway not found") + assert(not msg.Tags.Services or updatedServices, "Services must be provided if Services-Json is provided") + -- keep defaults, but update any new ones + + -- If delegated staking is being fully enabled or disabled, clear the allowlist + local allowDelegatedStakingOverride = msg.Tags["Allow-Delegated-Staking"] + local enableOpenDelegatedStaking = allowDelegatedStakingOverride == "true" + local enableLimitedDelegatedStaking = allowDelegatedStakingOverride == "allowlist" + local disableDelegatedStaking = allowDelegatedStakingOverride == "false" + local shouldClearAllowlist = enableOpenDelegatedStaking or disableDelegatedStaking + local needNewAllowlist = not shouldClearAllowlist + and ( + enableLimitedDelegatedStaking + or (unsafeGateway.settings.allowedDelegatesLookup and msg.Tags["Allowed-Delegates"] ~= nil) + ) + + local updatedSettings = { + label = msg.Tags.Label or unsafeGateway.settings.label, + note = msg.Tags.Note or unsafeGateway.settings.note, + fqdn = msg.Tags.FQDN or unsafeGateway.settings.fqdn, + port = msg.Tags.Port or unsafeGateway.settings.port, + protocol = msg.Tags.Protocol or unsafeGateway.settings.protocol, + allowDelegatedStaking = enableOpenDelegatedStaking -- clear directive to enable + or enableLimitedDelegatedStaking -- clear directive to enable + or not disableDelegatedStaking -- NOT clear directive to DISABLE + and unsafeGateway.settings.allowDelegatedStaking, -- otherwise unspecified, so use previous setting + + allowedDelegates = needNewAllowlist and utils.splitAndTrimString(msg.Tags["Allowed-Delegates"], ",") -- replace the lookup list + or nil, -- change nothing + + minDelegatedStake = msg.Tags["Min-Delegated-Stake"] or unsafeGateway.settings.minDelegatedStake, + delegateRewardShareRatio = msg.Tags["Delegate-Reward-Share-Ratio"] + or unsafeGateway.settings.delegateRewardShareRatio, + properties = msg.Tags.Properties or unsafeGateway.settings.properties, + autoStake = not msg.Tags["Auto-Stake"] and unsafeGateway.settings.autoStake + or msg.Tags["Auto-Stake"] == "true", + } + + local observerAddress = msg.Tags["Observer-Address"] or unsafeGateway.observerAddress + local result = gar.updateGatewaySettings( + msg.From, + updatedSettings, + updatedServices, + observerAddress, + msg.Timestamp, + msg.Id + ) + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.UpdateGatewaySettings .. "-Notice" }, + Data = json.encode(result), + }) + end + ) + + addEventingHandler(ActionMap.ReassignName, utils.hasMatchingTag("Action", ActionMap.ReassignName), function(msg) + local newProcessId = msg.Tags["Process-Id"] + local name = string.lower(msg.Tags.Name) + local initiator = msg.Tags.Initiator + local allowUnsafeProcessId = msg.Tags["Allow-Unsafe-Addresses"] + assert(name and #name > 0, "Name is required") + assert(utils.isValidAddress(newProcessId, true), "Process Id must be a valid address.") + if initiator ~= nil then + assert(utils.isValidAddress(initiator, true), "Invalid initiator address.") + end + + local reassignment = arns.reassignName(name, msg.From, msg.Timestamp, newProcessId, allowUnsafeProcessId) + + Send(msg, { + Target = msg.From, + Action = ActionMap.ReassignName .. "-Notice", + Name = name, + Data = json.encode(reassignment), + }) + + if initiator ~= nil then + Send(msg, { + Target = initiator, + Action = ActionMap.ReassignName .. "-Notice", + Name = name, + Data = json.encode(reassignment), + }) + end + return + end) + + addEventingHandler( + ActionMap.SaveObservations, + utils.hasMatchingTag("Action", ActionMap.SaveObservations), + function(msg) + local reportTxId = msg.Tags["Report-Tx-Id"] + local failedGateways = utils.splitAndTrimString(msg.Tags["Failed-Gateways"], ",") + -- observers provide AR-IO-Epoch-Index, so check both + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or msg.Tags["AR-IO-Epoch-Index"] and tonumber(msg.Tags["AR-IO-Epoch-Index"]) + assert( + epochIndex and epochIndex >= 0 and utils.isInteger(epochIndex), + "Epoch index is required. Must be a number greater than 0." + ) + assert(utils.isValidArweaveAddress(reportTxId), "Invalid report tx id. Must be a valid Arweave address.") + for _, gateway in ipairs(failedGateways) do + assert(utils.isValidAddress(gateway, true), "Invalid failed gateway address: " .. gateway) + end + + local observations = + epochs.saveObservations(msg.From, reportTxId, failedGateways, epochIndex, msg.Timestamp) + if observations ~= nil then + local failureSummariesCount = utils.lengthOfTable(observations.failureSummaries or {}) + if failureSummariesCount > 0 then + msg.ioEvent:addField("Failure-Summaries-Count", failureSummariesCount) + end + local reportsCount = utils.lengthOfTable(observations.reports or {}) + if reportsCount > 0 then + msg.ioEvent:addField("Reports-Count", reportsCount) + end + end + + Send(msg, { + Target = msg.From, + Action = ActionMap.SaveObservations .. "-Notice", + Data = json.encode(observations), + }) + end + ) + + addEventingHandler(ActionMap.EpochSettings, utils.hasMatchingTag("Action", ActionMap.EpochSettings), function(msg) + local epochSettings = epochs.getSettings() + + Send(msg, { + Target = msg.From, + Action = ActionMap.EpochSettings .. "-Notice", + Data = json.encode(epochSettings), + }) + end) + + addEventingHandler( + ActionMap.DemandFactorSettings, + utils.hasMatchingTag("Action", ActionMap.DemandFactorSettings), + function(msg) + local demandFactorSettings = demand.getSettings() + Send(msg, { + Target = msg.From, + Action = ActionMap.DemandFactorSettings .. "-Notice", + Data = json.encode(demandFactorSettings), + }) + end + ) + + addEventingHandler( + ActionMap.GatewayRegistrySettings, + utils.hasMatchingTag("Action", ActionMap.GatewayRegistrySettings), + function(msg) + local gatewayRegistrySettings = gar.getSettings() + Send(msg, { + Target = msg.From, + Action = ActionMap.GatewayRegistrySettings .. "-Notice", + Data = json.encode(gatewayRegistrySettings), + }) + end + ) + + -- Reference: https://github.com/permaweb/aos/blob/eea71b68a4f89ac14bf6797804f97d0d39612258/blueprints/token.lua#L264-L280 + addEventingHandler("totalSupply", utils.hasMatchingTag("Action", ActionMap.TotalSupply), function(msg) + assert(msg.From ~= ao.id, "Cannot call Total-Supply from the same process!") + local totalSupplyDetails = token.computeTotalSupply() + addSupplyData(msg.ioEvent, { + totalTokenSupply = totalSupplyDetails.totalSupply, + }) + addTalliesData(msg.ioEvent, totalSupplyDetails.stateObjectTallies) + msg.ioEvent:addField("Last-Known-Total-Token-Supply", token.lastKnownTotalTokenSupply()) + Send(msg, { + Action = "Total-Supply", + Data = tostring(totalSupplyDetails.totalSupply), + Ticker = Ticker, + }) + end) + + addEventingHandler("totalTokenSupply", utils.hasMatchingTag("Action", ActionMap.TotalTokenSupply), function(msg) + local totalSupplyDetails = token.computeTotalSupply() + addSupplyData(msg.ioEvent, { + totalTokenSupply = totalSupplyDetails.totalSupply, + }) + addTalliesData(msg.ioEvent, totalSupplyDetails.stateObjectTallies) + msg.ioEvent:addField("Last-Known-Total-Token-Supply", token.lastKnownTotalTokenSupply()) + + Send(msg, { + Target = msg.From, + Action = ActionMap.TotalTokenSupply .. "-Notice", + ["Total-Supply"] = tostring(totalSupplyDetails.totalSupply), + ["Circulating-Supply"] = tostring(totalSupplyDetails.circulatingSupply), + ["Locked-Supply"] = tostring(totalSupplyDetails.lockedSupply), + ["Staked-Supply"] = tostring(totalSupplyDetails.stakedSupply), + ["Delegated-Supply"] = tostring(totalSupplyDetails.delegatedSupply), + ["Withdraw-Supply"] = tostring(totalSupplyDetails.withdrawSupply), + ["Protocol-Balance"] = tostring(totalSupplyDetails.protocolBalance), + Data = json.encode({ + -- NOTE: json.lua supports up to stringified numbers with 20 significant digits - numbers should always be stringified + total = totalSupplyDetails.totalSupply, + circulating = totalSupplyDetails.circulatingSupply, + locked = totalSupplyDetails.lockedSupply, + staked = totalSupplyDetails.stakedSupply, + delegated = totalSupplyDetails.delegatedSupply, + withdrawn = totalSupplyDetails.withdrawSupply, + protocolBalance = totalSupplyDetails.protocolBalance, + }), + }) + end) + + -- distribute rewards + -- NOTE: THIS IS A CRITICAL HANDLER AND WILL DISCARD THE MEMORY ON ERROR + addEventingHandler("distribute", function(msg) + return msg.Action == "Tick" or msg.Action == "Distribute" + end, function(msg) + local msgId = msg.Id + local blockHeight = tonumber(msg["Block-Height"]) + local hashchain = msg["Hash-Chain"] + local lastCreatedEpochIndex = LastCreatedEpochIndex + local lastDistributedEpochIndex = LastDistributedEpochIndex + local targetCurrentEpochIndex = epochs.getEpochIndexForTimestamp(msg.Timestamp) + + assert(blockHeight, "Block height is required") + assert(hashchain, "Hash chain is required") + + msg.ioEvent:addField("Last-Created-Epoch-Index", lastCreatedEpochIndex) + msg.ioEvent:addField("Last-Distributed-Epoch-Index", lastDistributedEpochIndex) + msg.ioEvent:addField("Target-Current-Epoch-Index", targetCurrentEpochIndex) + + -- tick and distribute rewards for every index between the last ticked epoch and the current epoch + local distributedEpochIndexes = {} + local newEpochIndexes = {} + local newPruneGatewaysResults = {} + local tickedRewardDistributions = {} + local totalTickedRewardsDistributed = 0 + + -- tick the demand factor all the way to the current period + local latestDemandFactor, newDemandFactors = demand.updateDemandFactor(msg.Timestamp) + if latestDemandFactor ~= nil then + Send(msg, { + Target = msg.From, + Action = "Demand-Factor-Updated-Notice", + Data = tostring(latestDemandFactor), + }) + end + + --[[ + Tick up to the target epoch index, this will create new epochs and distribute rewards for existing epochs + This should never fall behind, but in the case it does, it will create the epochs and distribute rewards for the epochs + accordingly. It should finish at the target epoch index - which is computed based on the message timestamp + ]] + -- + print("Ticking from " .. lastCreatedEpochIndex .. " to " .. targetCurrentEpochIndex) + for epochIndexToTick = lastCreatedEpochIndex, targetCurrentEpochIndex do + local tickResult = tick.tickEpoch(msg.Timestamp, blockHeight, hashchain, msgId, epochIndexToTick) + if tickResult.pruneGatewaysResult ~= nil then + table.insert(newPruneGatewaysResults, tickResult.pruneGatewaysResult) + end + if tickResult.maybeNewEpoch ~= nil then + print("Created epoch " .. tickResult.maybeNewEpoch.epochIndex) + LastCreatedEpochIndex = tickResult.maybeNewEpoch.epochIndex + table.insert(newEpochIndexes, tickResult.maybeNewEpoch.epochIndex) + Send(msg, { + Target = msg.From, + Action = "Epoch-Created-Notice", + ["Epoch-Index"] = tostring(tickResult.maybeNewEpoch.epochIndex), + Data = json.encode(tickResult.maybeNewEpoch), + }) + end + if tickResult.maybeDistributedEpoch ~= nil then + print("Distributed rewards for epoch " .. tickResult.maybeDistributedEpoch.epochIndex) + LastDistributedEpochIndex = tickResult.maybeDistributedEpoch.epochIndex + tickedRewardDistributions[tostring(tickResult.maybeDistributedEpoch.epochIndex)] = + tickResult.maybeDistributedEpoch.distributions.totalDistributedRewards + totalTickedRewardsDistributed = totalTickedRewardsDistributed + + tickResult.maybeDistributedEpoch.distributions.totalDistributedRewards + table.insert(distributedEpochIndexes, tickResult.maybeDistributedEpoch.epochIndex) + Send(msg, { + Target = msg.From, + Action = "Epoch-Distribution-Notice", + ["Epoch-Index"] = tostring(tickResult.maybeDistributedEpoch.epochIndex), + Data = json.encode(tickResult.maybeDistributedEpoch), + }) + end + end + if #distributedEpochIndexes > 0 then + msg.ioEvent:addField("Distributed-Epoch-Indexes", distributedEpochIndexes) + end + if #newEpochIndexes > 0 then + msg.ioEvent:addField("New-Epoch-Indexes", newEpochIndexes) + -- Only print the prescribed observers of the newest epoch + local newestEpoch = epochs.getEpoch(math.max(table.unpack(newEpochIndexes))) + local prescribedObserverAddresses = {} + local prescribedObserverGatewayAddresses = {} + if newestEpoch ~= nil and newestEpoch.prescribedObservers ~= nil then + for observerAddress, gatewayAddress in pairs(newestEpoch.prescribedObservers) do + table.insert(prescribedObserverAddresses, observerAddress) + table.insert(prescribedObserverGatewayAddresses, gatewayAddress) + end + end + msg.ioEvent:addField("Prescribed-Observer-Addresses", prescribedObserverAddresses) + msg.ioEvent:addField("Prescribed-Observer-Gateway-Addresses", prescribedObserverGatewayAddresses) + end + local updatedDemandFactorCount = utils.lengthOfTable(newDemandFactors) + if updatedDemandFactorCount > 0 then + local updatedDemandFactorPeriods = {} + local updatedDemandFactorValues = {} + for _, df in ipairs(newDemandFactors) do + table.insert(updatedDemandFactorPeriods, df.period) + table.insert(updatedDemandFactorValues, df.demandFactor) + end + msg.ioEvent:addField("New-Demand-Factor-Periods", updatedDemandFactorPeriods) + msg.ioEvent:addField("New-Demand-Factor-Values", updatedDemandFactorValues) + msg.ioEvent:addField("New-Demand-Factor-Count", updatedDemandFactorCount) + end + if #newPruneGatewaysResults > 0 then + -- Reduce the prune gateways results and then track changes + --- @type PruneGatewaysResult + local aggregatedPruneGatewaysResult = utils.reduce( + newPruneGatewaysResults, + --- @param acc PruneGatewaysResult + --- @param _ any + --- @param pruneGatewaysResult PruneGatewaysResult + function(acc, _, pruneGatewaysResult) + for _, address in pairs(pruneGatewaysResult.prunedGateways) do + table.insert(acc.prunedGateways, address) + end + for address, slashAmount in pairs(pruneGatewaysResult.slashedGateways) do + acc.slashedGateways[address] = (acc.slashedGateways[address] or 0) + slashAmount + end + acc.gatewayStakeReturned = acc.gatewayStakeReturned + pruneGatewaysResult.gatewayStakeReturned + acc.delegateStakeReturned = acc.delegateStakeReturned + pruneGatewaysResult.delegateStakeReturned + acc.gatewayStakeWithdrawing = acc.gatewayStakeWithdrawing + + pruneGatewaysResult.gatewayStakeWithdrawing + acc.delegateStakeWithdrawing = acc.delegateStakeWithdrawing + + pruneGatewaysResult.delegateStakeWithdrawing + acc.stakeSlashed = acc.stakeSlashed + pruneGatewaysResult.stakeSlashed + -- Upsert to the latest tallies if available + acc.gatewayObjectTallies = pruneGatewaysResult.gatewayObjectTallies or acc.gatewayObjectTallies + return acc + end, + { + prunedGateways = {}, + slashedGateways = {}, + gatewayStakeReturned = 0, + delegateStakeReturned = 0, + gatewayStakeWithdrawing = 0, + delegateStakeWithdrawing = 0, + stakeSlashed = 0, + } + ) + addPruneGatewaysResult(msg.ioEvent, aggregatedPruneGatewaysResult) + end + if utils.lengthOfTable(tickedRewardDistributions) > 0 then + msg.ioEvent:addField("Ticked-Reward-Distributions", tickedRewardDistributions) + msg.ioEvent:addField("Total-Ticked-Rewards-Distributed", totalTickedRewardsDistributed) + LastKnownCirculatingSupply = LastKnownCirculatingSupply + totalTickedRewardsDistributed + end + + local gwStats = gatewayStats() + msg.ioEvent:addField("Joined-Gateways-Count", gwStats.joined) + msg.ioEvent:addField("Leaving-Gateways-Count", gwStats.leaving) + addSupplyData(msg.ioEvent) + + -- Send a single tick notice to the sender after all epochs have been ticked + Send(msg, { + Target = msg.From, + Action = "Tick-Notice", + Data = json.encode({ + distributedEpochIndexes = distributedEpochIndexes, + newEpochIndexes = newEpochIndexes, + newDemandFactors = newDemandFactors, + newPruneGatewaysResults = newPruneGatewaysResults, + tickedRewardDistributions = tickedRewardDistributions, + totalTickedRewardsDistributed = totalTickedRewardsDistributed, + }), + }) + end, CRITICAL) + + -- READ HANDLERS + + addEventingHandler(ActionMap.Info, Handlers.utils.hasMatchingTag("Action", ActionMap.Info), function(msg) + local handlers = Handlers.list + local handlerNames = {} + + for _, handler in ipairs(handlers) do + table.insert(handlerNames, handler.name) + end + + local memoryKiBUsed = collectgarbage("count") + + Send(msg, { + Target = msg.From, + Action = "Info-Notice", + Tags = { + Name = Name, + Ticker = Ticker, + Logo = Logo, + Owner = Owner, + Denomination = tostring(Denomination), + LastCreatedEpochIndex = tostring(LastCreatedEpochIndex), + LastDistributedEpochIndex = tostring(LastDistributedEpochIndex), + Handlers = json.encode(handlerNames), + ["Memory-KiB-Used"] = tostring(memoryKiBUsed), + }, + Data = json.encode({ + Name = Name, + Ticker = Ticker, + Logo = Logo, + Owner = Owner, + Denomination = Denomination, + LastCreatedEpochIndex = LastCreatedEpochIndex, + LastDistributedEpochIndex = LastDistributedEpochIndex, + Handlers = handlerNames, + ["Memory-KiB-Used"] = memoryKiBUsed, + }), + }) + end) + + addEventingHandler(ActionMap.Gateway, Handlers.utils.hasMatchingTag("Action", ActionMap.Gateway), function(msg) + local gateway = gar.getCompactGateway(msg.Tags.Address or msg.From) + Send(msg, { + Target = msg.From, + Action = "Gateway-Notice", + Gateway = msg.Tags.Address or msg.From, + Data = json.encode(gateway), + }) + end) + + --- NOTE: this handler does not scale well, but various ecosystem apps rely on it (arconnect, ao.link, etc.) + addEventingHandler(ActionMap.Balances, Handlers.utils.hasMatchingTag("Action", ActionMap.Balances), function(msg) + Send(msg, { + Target = msg.From, + Action = "Balances-Notice", + Data = json.encode(Balances), + }) + end) + + addEventingHandler(ActionMap.Balance, Handlers.utils.hasMatchingTag("Action", ActionMap.Balance), function(msg) + local target = msg.Tags.Target or msg.Tags.Address or msg.Tags.Recipient or msg.From + local balance = balances.getBalance(target) + + -- must adhere to token.lua spec defined by https://github.com/permaweb/aos/blob/15dd81ee596518e2f44521e973b8ad1ce3ee9945/blueprints/token.lua + Send(msg, { + Target = msg.From, + Action = "Balance-Notice", + Account = target, + Data = tostring(balance), + Balance = tostring(balance), + Ticker = Ticker, + }) + end) + + addEventingHandler(ActionMap.DemandFactor, utils.hasMatchingTag("Action", ActionMap.DemandFactor), function(msg) + local demandFactor = demand.getDemandFactor() + Send(msg, { + Target = msg.From, + Action = "Demand-Factor-Notice", + Data = json.encode(demandFactor), + }) + end) + + addEventingHandler( + ActionMap.DemandFactorInfo, + utils.hasMatchingTag("Action", ActionMap.DemandFactorInfo), + function(msg) + local result = demand.getDemandFactorInfo() + Send(msg, { Target = msg.From, Action = "Demand-Factor-Info-Notice", Data = json.encode(result) }) + end + ) + + addEventingHandler(ActionMap.Record, utils.hasMatchingTag("Action", ActionMap.Record), function(msg) + local record = arns.getRecord(msg.Tags.Name) + + local recordNotice = { + Target = msg.From, + Action = "Record-Notice", + Name = msg.Tags.Name, + Data = json.encode(record), + } + + -- Add forwarded tags to the credit and debit notice messages + for tagName, tagValue in pairs(msg) do + -- Tags beginning with "X-" are forwarded + if string.sub(tagName, 1, 2) == "X-" then + recordNotice[tagName] = tagValue + end + end + + -- Send Record-Notice + Send(msg, recordNotice) + end) + + addEventingHandler(ActionMap.Epoch, utils.hasMatchingTag("Action", ActionMap.Epoch), function(msg) + -- check if the epoch number is provided, if not get the epoch number from the timestamp + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) + local epoch = epochs.getEpoch(epochIndex) + if epoch then + -- populate the prescribed observers with weights for the epoch, this helps improve DX of downstream apps + epoch.prescribedObservers = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) + end + if epoch and epoch.distributions then + -- remove the distributions data from the epoch to avoid unbounded response payloads + epoch.distributions.rewards = nil + end + Send(msg, { Target = msg.From, Action = "Epoch-Notice", Data = json.encode(epoch) }) + end) + + addEventingHandler( + ActionMap.PrescribedObservers, + utils.hasMatchingTag("Action", ActionMap.PrescribedObservers), + function(msg) + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) + local prescribedObserversWithWeights = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) + Send(msg, { + Target = msg.From, + Action = "Prescribed-Observers-Notice", + Data = json.encode(prescribedObserversWithWeights), + }) + end + ) + + addEventingHandler(ActionMap.Observations, utils.hasMatchingTag("Action", ActionMap.Observations), function(msg) + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) + local observations = epochs.getObservationsForEpoch(epochIndex) + Send(msg, { + Target = msg.From, + Action = "Observations-Notice", + EpochIndex = tostring(epochIndex), + Data = json.encode(observations), + }) + end) + + addEventingHandler( + ActionMap.PrescribedNames, + utils.hasMatchingTag("Action", ActionMap.PrescribedNames), + function(msg) + -- check if the epoch number is provided, if not get the epoch number from the timestamp + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) + local prescribedNames = epochs.getPrescribedNamesForEpoch(epochIndex) + Send(msg, { + Target = msg.From, + Action = "Prescribed-Names-Notice", + Data = json.encode(prescribedNames), + }) + end + ) + + addEventingHandler(ActionMap.Distributions, utils.hasMatchingTag("Action", ActionMap.Distributions), function(msg) + -- check if the epoch number is provided, if not get the epoch number from the timestamp + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) + local distributions = epochs.getDistributionsForEpoch(epochIndex) + Send(msg, { + Target = msg.From, + Action = "Distributions-Notice", + Data = json.encode(distributions), + }) + end) + + addEventingHandler("epochRewards", utils.hasMatchingTag("Action", ActionMap.EpochRewards), function(msg) + local page = utils.parsePaginationTags(msg) + + local epochRewards = epochs.getEligibleRewardsForEpoch( + msg.Timestamp, + page.cursor, + page.limit, + page.sortBy or "cursorId", + page.sortOrder + ) + + Send(msg, { + Target = msg.From, + Action = "Epoch-Eligible-Rewards-Notice", + Data = json.encode(epochRewards), + }) + end) + + addEventingHandler("paginatedReservedNames", utils.hasMatchingTag("Action", ActionMap.ReservedNames), function(msg) + local page = utils.parsePaginationTags(msg) + local reservedNames = + arns.getPaginatedReservedNames(page.cursor, page.limit, page.sortBy or "name", page.sortOrder) + Send(msg, { Target = msg.From, Action = "Reserved-Names-Notice", Data = json.encode(reservedNames) }) + end) + + addEventingHandler(ActionMap.ReservedName, utils.hasMatchingTag("Action", ActionMap.ReservedName), function(msg) + local name = msg.Tags.Name and string.lower(msg.Tags.Name) + assert(name, "Name is required") + local reservedName = arns.getReservedName(name) + Send(msg, { + Target = msg.From, + Action = "Reserved-Name-Notice", + ReservedName = msg.Tags.Name, + Data = json.encode(reservedName), + }) + end) + + addEventingHandler(ActionMap.Vault, utils.hasMatchingTag("Action", ActionMap.Vault), function(msg) + local address = msg.Tags.Address or msg.From + local vaultId = msg.Tags["Vault-Id"] + local vault = vaults.getVault(address, vaultId) + assert(vault, "Vault not found") + Send(msg, { + Target = msg.From, + Action = "Vault-Notice", + Address = address, + ["Vault-Id"] = vaultId, + Data = json.encode(vault), + }) + end) + + -- Pagination handlers + + addEventingHandler("paginatedRecords", function(msg) + return msg.Action == "Paginated-Records" or msg.Action == ActionMap.Records + end, function(msg) + local page = utils.parsePaginationTags(msg) + local result = arns.getPaginatedRecords( + page.cursor, + page.limit, + page.sortBy or "startTimestamp", + page.sortOrder, + page.filters + ) + Send(msg, { Target = msg.From, Action = "Records-Notice", Data = json.encode(result) }) + end) + + addEventingHandler("paginatedGateways", function(msg) + return msg.Action == "Paginated-Gateways" or msg.Action == ActionMap.Gateways + end, function(msg) + local page = utils.parsePaginationTags(msg) + local result = + gar.getPaginatedGateways(page.cursor, page.limit, page.sortBy or "startTimestamp", page.sortOrder or "desc") + Send(msg, { Target = msg.From, Action = "Gateways-Notice", Data = json.encode(result) }) + end) + + addEventingHandler("paginatedBalances", utils.hasMatchingTag("Action", "Paginated-Balances"), function(msg) + local page = utils.parsePaginationTags(msg) + local walletBalances = + balances.getPaginatedBalances(page.cursor, page.limit, page.sortBy or "balance", page.sortOrder) + Send(msg, { Target = msg.From, Action = "Balances-Notice", Data = json.encode(walletBalances) }) + end) + + addEventingHandler("paginatedVaults", function(msg) + return msg.Action == "Paginated-Vaults" or msg.Action == ActionMap.Vaults + end, function(msg) + local page = utils.parsePaginationTags(msg) + local pageVaults = vaults.getPaginatedVaults(page.cursor, page.limit, page.sortOrder, page.sortBy) + Send(msg, { Target = msg.From, Action = "Vaults-Notice", Data = json.encode(pageVaults) }) + end) + + addEventingHandler("paginatedDelegates", function(msg) + return msg.Action == "Paginated-Delegates" or msg.Action == ActionMap.Delegates + end, function(msg) + local page = utils.parsePaginationTags(msg) + local result = gar.getPaginatedDelegates( + msg.Tags.Address or msg.From, + page.cursor, + page.limit, + page.sortBy or "startTimestamp", + page.sortOrder + ) + Send(msg, { Target = msg.From, Action = "Delegates-Notice", Data = json.encode(result) }) + end) + + addEventingHandler( + "paginatedAllowedDelegates", + utils.hasMatchingTag("Action", "Paginated-Allowed-Delegates"), + function(msg) + local page = utils.parsePaginationTags(msg) + local result = + gar.getPaginatedAllowedDelegates(msg.Tags.Address or msg.From, page.cursor, page.limit, page.sortOrder) + Send(msg, { Target = msg.From, Action = "Allowed-Delegates-Notice", Data = json.encode(result) }) + end + ) + + -- END READ HANDLERS + + addEventingHandler("releaseName", utils.hasMatchingTag("Action", ActionMap.ReleaseName), function(msg) + -- validate the name and process id exist, then create the returned name + local name = msg.Tags.Name and string.lower(msg.Tags.Name) + local processId = msg.From + local initiator = msg.Tags.Initiator or msg.From + + assert(name and #name > 0, "Name is required") -- this could be an undername, so we don't want to assertValidArNSName + assert(processId and utils.isValidAddress(processId, true), "Process-Id must be a valid address") + assert(initiator and utils.isValidAddress(initiator, true), "Initiator is required") + local record = arns.getRecord(name) + assert(record, "Record not found") + assert(record.type == "permabuy", "Only permabuy names can be released") + assert(record.processId == processId, "Process-Id mismatch") + assert( + #primaryNames.getPrimaryNamesForBaseName(name) == 0, + "Primary names are associated with this name. They must be removed before releasing the name." + ) + + -- we should be able to create the returned name here + local removedRecord = arns.removeRecord(name) + local removedPrimaryNamesAndOwners = primaryNames.removePrimaryNamesForBaseName(name) -- NOTE: this should be empty if there are no primary names allowed before release + local returnedName = arns.createReturnedName(name, msg.Timestamp, initiator) + local returnedNameData = { + removedRecord = removedRecord, + removedPrimaryNamesAndOwners = removedPrimaryNamesAndOwners, + returnedName = returnedName, + } + + addReturnedNameResultFields(msg.ioEvent, { + name = name, + returnedName = returnedNameData.returnedName, + removedRecord = returnedNameData.removedRecord, + removedPrimaryNamesAndOwners = returnedNameData.removedPrimaryNamesAndOwners, + }) + + -- note: no change to token supply here - only on buy record of returned name + msg.ioEvent:addField("Returned-Name-Count", utils.lengthOfTable(NameRegistry.returned)) + msg.ioEvent:addField("Records-Count", utils.lengthOfTable(NameRegistry.records)) + + local releaseNameData = { + name = name, + startTimestamp = returnedName.startTimestamp, + endTimestamp = returnedName.startTimestamp + constants.RETURNED_NAME_DURATION_MS, + initiator = returnedName.initiator, + } + + -- send to the initiator and the process that released the name + Send(msg, { + Target = initiator, + Action = "Returned-Name-Notice", + Name = name, + Data = json.encode(releaseNameData), + }) + Send(msg, { + Target = processId, + Action = "Returned-Name-Notice", + Name = name, + Data = json.encode(releaseNameData), + }) + end) + + addEventingHandler(ActionMap.ReturnedNames, utils.hasMatchingTag("Action", ActionMap.ReturnedNames), function(msg) + local page = utils.parsePaginationTags(msg) + local returnedNames = arns.getReturnedNamesUnsafe() + + --- @type ReturnedNameData[] -- Returned Names with End Timestamp and Premium Multiplier + local returnedNameDataArray = {} + + for _, v in pairs(returnedNames) do + table.insert(returnedNameDataArray, { + name = v.name, + startTimestamp = v.startTimestamp, + endTimestamp = v.startTimestamp + constants.RETURNED_NAME_DURATION_MS, + initiator = v.initiator, + premiumMultiplier = arns.getReturnedNamePremiumMultiplier(v.startTimestamp, msg.Timestamp), + }) + end + + -- paginate the returnedNames by name, showing returnedNames nearest to the endTimestamp first + local paginatedReturnedNames = utils.paginateTableWithCursor( + returnedNameDataArray, + page.cursor, + "name", + page.limit, + page.sortBy or "endTimestamp", + page.sortOrder or "asc" + ) + Send(msg, { + Target = msg.From, + Action = ActionMap.ReturnedNames .. "-Notice", + Data = json.encode(paginatedReturnedNames), + }) + end) + + addEventingHandler(ActionMap.ReturnedName, utils.hasMatchingTag("Action", ActionMap.ReturnedName), function(msg) + local name = string.lower(msg.Tags.Name) + local returnedName = arns.getReturnedNameUnsafe(name) + + assert(returnedName, "Returned name not found") + + Send(msg, { + Target = msg.From, + Action = ActionMap.ReturnedName .. "-Notice", + Data = json.encode({ + name = returnedName.name, + startTimestamp = returnedName.startTimestamp, + endTimestamp = returnedName.startTimestamp + constants.RETURNED_NAME_DURATION_MS, + initiator = returnedName.initiator, + premiumMultiplier = arns.getReturnedNamePremiumMultiplier(returnedName.startTimestamp, msg.Timestamp), + }), + }) + end) + + addEventingHandler("allowDelegates", utils.hasMatchingTag("Action", ActionMap.AllowDelegates), function(msg) + local allowedDelegates = msg.Tags["Allowed-Delegates"] + and utils.splitAndTrimString(msg.Tags["Allowed-Delegates"], ",") + assert(allowedDelegates and #allowedDelegates > 0, "Allowed-Delegates is required") + msg.ioEvent:addField("Input-New-Delegates-Count", utils.lengthOfTable(allowedDelegates)) + local result = gar.allowDelegates(allowedDelegates, msg.From) + + if result ~= nil then + msg.ioEvent:addField("New-Allowed-Delegates", result.newAllowedDelegates or {}) + msg.ioEvent:addField("New-Allowed-Delegates-Count", utils.lengthOfTable(result.newAllowedDelegates)) + msg.ioEvent:addField( + "Gateway-Total-Allowed-Delegates", + utils.lengthOfTable(result.gateway and result.gateway.settings.allowedDelegatesLookup or {}) + + utils.lengthOfTable(result.gateway and result.gateway.delegates or {}) + ) + end + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.AllowDelegates .. "-Notice" }, + Data = json.encode(result and result.newAllowedDelegates or {}), + }) + end) + + addEventingHandler("disallowDelegates", utils.hasMatchingTag("Action", ActionMap.DisallowDelegates), function(msg) + local disallowedDelegates = msg.Tags["Disallowed-Delegates"] + and utils.splitAndTrimString(msg.Tags["Disallowed-Delegates"], ",") + assert(disallowedDelegates and #disallowedDelegates > 0, "Disallowed-Delegates is required") + msg.ioEvent:addField("Input-Disallowed-Delegates-Count", utils.lengthOfTable(disallowedDelegates)) + local result = gar.disallowDelegates(disallowedDelegates, msg.From, msg.Id, msg.Timestamp) + if result ~= nil then + msg.ioEvent:addField("New-Disallowed-Delegates", result.removedDelegates or {}) + msg.ioEvent:addField("New-Disallowed-Delegates-Count", utils.lengthOfTable(result.removedDelegates)) + msg.ioEvent:addField( + "Gateway-Total-Allowed-Delegates", + utils.lengthOfTable(result.gateway and result.gateway.settings.allowedDelegatesLookup or {}) + + utils.lengthOfTable(result.gateway and result.gateway.delegates or {}) + ) + end + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.DisallowDelegates .. "-Notice" }, + Data = json.encode(result and result.removedDelegates or {}), + }) + end) + + addEventingHandler("paginatedDelegations", utils.hasMatchingTag("Action", "Paginated-Delegations"), function(msg) + local address = msg.Tags.Address or msg.From + local page = utils.parsePaginationTags(msg) + + assert(utils.isValidAddress(address, true), "Invalid address.") + + local result = gar.getPaginatedDelegations(address, page.cursor, page.limit, page.sortBy, page.sortOrder) + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.Delegations .. "-Notice" }, + Data = json.encode(result), + }) + end) + + addEventingHandler( + ActionMap.RedelegateStake, + utils.hasMatchingTag("Action", ActionMap.RedelegateStake), + function(msg) + local sourceAddress = msg.Tags.Source + local targetAddress = msg.Tags.Target + local delegateAddress = msg.From + local quantity = msg.Tags.Quantity or nil + local vaultId = msg.Tags["Vault-Id"] + + assert(utils.isValidAddress(sourceAddress, true), "Invalid source gateway address") + assert(utils.isValidAddress(targetAddress, true), "Invalid target gateway address") + assert(utils.isValidAddress(delegateAddress, true), "Invalid delegator address") + if vaultId then + assert(utils.isValidAddress(vaultId, true), "Invalid vault id") + end + + assert( + quantity and quantity > 0 and utils.isInteger(quantity), + "Invalid quantity. Must be integer greater than 0" + ) + local redelegationResult = gar.redelegateStake({ + sourceAddress = sourceAddress, + targetAddress = targetAddress, + delegateAddress = delegateAddress, + qty = quantity, + currentTimestamp = msg.Timestamp, + vaultId = vaultId, + }) + + local redelegationFee = redelegationResult.redelegationFee + local stakeMoved = quantity - redelegationFee + + local isStakeMovingFromDelegateToOperator = delegateAddress == targetAddress + local isStakeMovingFromOperatorToDelegate = delegateAddress == sourceAddress + local isStakeMovingFromWithdrawal = vaultId ~= nil + + --- Stake Direction Codings: + --- dw2o = Delegate Withdrawal to Operator Stake + --- d2o = Delegate Stake to Operator Stake + --- ow2d = Operator Withdrawal to Delegate Stake + --- o2d = Operator Stake to Delegate Stake + --- dw2d = Delegate Withdrawal to Other Delegate Stake + --- d2d = Delegate Stake to Other Delegate Stake + msg.ioEvent:addField( + "Stake-Direction", + isStakeMovingFromDelegateToOperator and (isStakeMovingFromWithdrawal and "dw2o" or "d2o") + or ( + isStakeMovingFromOperatorToDelegate and (isStakeMovingFromWithdrawal and "ow2d" or "o2d") + or (isStakeMovingFromWithdrawal and "dw2d" or "d2d") + ) + ) + + if isStakeMovingFromWithdrawal then + LastKnownWithdrawSupply = LastKnownWithdrawSupply - quantity + end + + if isStakeMovingFromDelegateToOperator then + if not isStakeMovingFromWithdrawal then + LastKnownDelegatedSupply = LastKnownDelegatedSupply - quantity + end + LastKnownStakedSupply = LastKnownStakedSupply + stakeMoved + elseif isStakeMovingFromOperatorToDelegate then + if not isStakeMovingFromWithdrawal then + LastKnownStakedSupply = LastKnownStakedSupply - quantity + end + LastKnownDelegatedSupply = LastKnownDelegatedSupply + stakeMoved + elseif isStakeMovingFromWithdrawal then + LastKnownStakedSupply = LastKnownStakedSupply + stakeMoved + else + LastKnownStakedSupply = LastKnownStakedSupply - redelegationFee + end + + if redelegationFee > 0 then + msg.ioEvent:addField("Redelegation-Fee", redelegationFee) + end + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { + Action = ActionMap.RedelegateStake .. "-Notice", + }, + Data = json.encode(redelegationResult), + }) + end + ) + + addEventingHandler( + ActionMap.RedelegationFee, + utils.hasMatchingTag("Action", ActionMap.RedelegationFee), + function(msg) + local delegateAddress = msg.Tags.Address or msg.From + assert(utils.isValidAddress(delegateAddress, true), "Invalid delegator address") + local feeResult = gar.getRedelegationFee(delegateAddress) + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.RedelegationFee .. "-Notice" }, + Data = json.encode(feeResult), + }) + end + ) + + --- PRIMARY NAMES + addEventingHandler("removePrimaryName", utils.hasMatchingTag("Action", ActionMap.RemovePrimaryNames), function(msg) + local names = utils.splitAndTrimString(msg.Tags.Names, ",") + assert(names and #names > 0, "Names are required") + assert(msg.From, "From is required") + local notifyOwners = msg.Tags["Notify-Owners"] and msg.Tags["Notify-Owners"] == "true" or false + + local removedPrimaryNamesAndOwners = primaryNames.removePrimaryNames(names, msg.From) + local removedPrimaryNamesCount = utils.lengthOfTable(removedPrimaryNamesAndOwners) + msg.ioEvent:addField("Num-Removed-Primary-Names", removedPrimaryNamesCount) + if removedPrimaryNamesCount > 0 then + msg.ioEvent:addField( + "Removed-Primary-Names", + utils.map(removedPrimaryNamesAndOwners, function(_, v) + return v.name + end) + ) + msg.ioEvent:addField( + "Removed-Primary-Name-Owners", + utils.map(removedPrimaryNamesAndOwners, function(_, v) + return v.owner + end) + ) + end + addPrimaryNameCounts(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Action = ActionMap.RemovePrimaryNames .. "-Notice", + Data = json.encode(removedPrimaryNamesAndOwners), + }) + + -- Send messages to the owners of the removed primary names if the notifyOwners flag is true + if notifyOwners then + for _, removedPrimaryNameAndOwner in pairs(removedPrimaryNamesAndOwners) do + Send(msg, { + Target = removedPrimaryNameAndOwner.owner, + Action = ActionMap.RemovePrimaryNames .. "-Notice", + Tags = { Name = removedPrimaryNameAndOwner.name }, + Data = json.encode(removedPrimaryNameAndOwner), + }) + end + end + end) + + addEventingHandler("requestPrimaryName", utils.hasMatchingTag("Action", ActionMap.RequestPrimaryName), function(msg) + local fundFrom = msg.Tags["Fund-From"] + local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil + local initiator = msg.From + assert(name, "Name is required") + assert(initiator, "Initiator is required") + assertValidFundFrom(fundFrom) + + local primaryNameResult = + primaryNames.createPrimaryNameRequest(name, initiator, msg.Timestamp, msg.Id, fundFrom) + + addPrimaryNameRequestData(msg.ioEvent, primaryNameResult) + + --- if the from is the new owner, then send an approved notice to the from + if primaryNameResult.newPrimaryName then + Send(msg, { + Target = msg.From, + Action = ActionMap.ApprovePrimaryNameRequest .. "-Notice", + Data = json.encode(primaryNameResult), + }) + return + end + + if primaryNameResult.request then + --- send a notice to the msg.From, and the base name owner + Send(msg, { + Target = msg.From, + Action = ActionMap.PrimaryNameRequest .. "-Notice", + Data = json.encode(primaryNameResult), + }) + Send(msg, { + Target = primaryNameResult.baseNameOwner, + Action = ActionMap.PrimaryNameRequest .. "-Notice", + Data = json.encode(primaryNameResult), + }) + end + end) + + addEventingHandler( + "approvePrimaryNameRequest", + utils.hasMatchingTag("Action", ActionMap.ApprovePrimaryNameRequest), + function(msg) + local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil + local recipient = msg.Tags.Recipient or msg.From + + assert(name, "Name is required") + assert(recipient, "Recipient is required") + assert(msg.From, "From is required") + + local approvedPrimaryNameResult = + primaryNames.approvePrimaryNameRequest(recipient, name, msg.From, msg.Timestamp) + addPrimaryNameRequestData(msg.ioEvent, approvedPrimaryNameResult) + + --- send a notice to the from + Send(msg, { + Target = msg.From, + Action = ActionMap.ApprovePrimaryNameRequest .. "-Notice", + Data = json.encode(approvedPrimaryNameResult), + }) + --- send a notice to the owner + Send(msg, { + Target = approvedPrimaryNameResult.newPrimaryName.owner, + Action = ActionMap.ApprovePrimaryNameRequest .. "-Notice", + Data = json.encode(approvedPrimaryNameResult), + }) + end + ) + + --- Handles forward and reverse resolutions (e.g. name -> address and address -> name) + addEventingHandler("getPrimaryNameData", utils.hasMatchingTag("Action", ActionMap.PrimaryName), function(msg) + local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil + local address = msg.Tags.Address or msg.From + local primaryNameData = name and primaryNames.getPrimaryNameDataWithOwnerFromName(name) + or address and primaryNames.getPrimaryNameDataWithOwnerFromAddress(address) + assert(primaryNameData, "Primary name data not found") + return Send(msg, { + Target = msg.From, + Action = ActionMap.PrimaryName .. "-Notice", + Tags = { Owner = primaryNameData.owner, Name = primaryNameData.name }, + Data = json.encode(primaryNameData), + }) + end) + + addEventingHandler( + "getPrimaryNameRequest", + utils.hasMatchingTag("Action", ActionMap.PrimaryNameRequest), + function(msg) + local initiator = msg.Tags.Initiator or msg.From + local result = primaryNames.getPrimaryNameRequest(initiator) + assert(result, "Primary name request not found for " .. initiator) + return Send(msg, { + Target = msg.From, + Action = ActionMap.PrimaryNameRequests .. "-Notice", + Data = json.encode({ + name = result.name, + startTimestamp = result.startTimestamp, + endTimestamp = result.endTimestamp, + initiator = initiator, + }), + }) + end + ) + + addEventingHandler( + "getPaginatedPrimaryNameRequests", + utils.hasMatchingTag("Action", ActionMap.PrimaryNameRequests), + function(msg) + local page = utils.parsePaginationTags(msg) + local result = primaryNames.getPaginatedPrimaryNameRequests( + page.cursor, + page.limit, + page.sortBy or "startTimestamp", + page.sortOrder or "asc" + ) + return Send(msg, { + Target = msg.From, + Action = ActionMap.PrimaryNameRequests .. "-Notice", + Data = json.encode(result), + }) + end + ) + + addEventingHandler("getPaginatedPrimaryNames", utils.hasMatchingTag("Action", ActionMap.PrimaryNames), function(msg) + local page = utils.parsePaginationTags(msg) + local result = primaryNames.getPaginatedPrimaryNames( + page.cursor, + page.limit, + page.sortBy or "name", + page.sortOrder or "asc" + ) + + return Send(msg, { + Target = msg.From, + Action = ActionMap.PrimaryNames .. "-Notice", + Data = json.encode(result), + }) + end) + + addEventingHandler( + "getPaginatedGatewayVaults", + utils.hasMatchingTag("Action", "Paginated-Gateway-Vaults"), + function(msg) + local page = utils.parsePaginationTags(msg) + local gatewayAddress = utils.formatAddress(msg.Tags.Address or msg.From) + assert(utils.isValidAddress(gatewayAddress, true), "Invalid gateway address") + local result = gar.getPaginatedVaultsForGateway( + gatewayAddress, + page.cursor, + page.limit, + page.sortBy or "endTimestamp", + page.sortOrder or "desc" + ) + return Send(msg, { + Target = msg.From, + Action = "Gateway-Vaults-Notice", + Data = json.encode(result), + }) + end + ) + + addEventingHandler("getPruningTimestamps", utils.hasMatchingTag("Action", "Pruning-Timestamps"), function(msg) + addNextPruneTimestampsData(msg.ioEvent) + return Send(msg, { + Target = msg.From, + Action = "Pruning-Timestamps-Notice", + Data = json.encode({ + returnedNames = arns.nextReturnedNamesPruneTimestamp(), + gateways = gar.nextGatewaysPruneTimestamp(), + primaryNames = primaryNames.nextPrimaryNamesPruneTimestamp(), + records = arns.nextRecordsPruneTimestamp(), + redelegations = gar.nextRedelegationsPruneTimestamp(), + vaults = vaults.nextVaultsPruneTimestamp(), + }), + }) + end) + + addEventingHandler("allPaginatedDelegates", utils.hasMatchingTag("Action", "All-Paginated-Delegates"), function(msg) + local page = utils.parsePaginationTags(msg) + local result = gar.getPaginatedDelegatesFromAllGateways(page.cursor, page.limit, page.sortBy, page.sortOrder) + Send(msg, { Target = msg.From, Action = "All-Delegates-Notice", Data = json.encode(result) }) + end) + + addEventingHandler("allPaginatedGatewayVaults", utils.hasMatchingTag("Action", "All-Gateway-Vaults"), function(msg) + local page = utils.parsePaginationTags(msg) + local result = gar.getPaginatedVaultsFromAllGateways(page.cursor, page.limit, page.sortBy, page.sortOrder) + Send(msg, { Target = msg.From, Action = "All-Gateway-Vaults-Notice", Data = json.encode(result) }) + end) + + addEventingHandler(ActionMap.PatchHyperbeamBalances, function(msg) + if msg.Tags.Action == ActionMap.PatchHyperbeamBalances then + return "continue" + end + return false + end, function(msg) + assert(msg.From == Owner, "Only the owner can trigger " .. ActionMap.PatchHyperbeamBalances) + + local patchBalances = {} + for address, balance in pairs(Balances) do + patchBalances[address] = tostring(balance) + end + + local patchMessage = { device = "patch@1.0", balances = patchBalances } + ao.send(patchMessage) + + return Send(msg, { + Target = msg.From, + Action = ActionMap.PatchHyperbeamBalances .. "-Notice", + }) + end) + + return main +end + +_G.package.loaded[".src.main"] = _loaded_mod_src_main() + +-- Initialize the HB balances state + +local patchBalances = {} +for address, balance in pairs(Balances) do + patchBalances[address] = tostring(balance) +end +ao.send({ + device = "patch@1.0", + balances = patchBalances, +}) diff --git a/src/hb.lua b/src/hb.lua new file mode 100644 index 00000000..3a3ec296 --- /dev/null +++ b/src/hb.lua @@ -0,0 +1,41 @@ +-- hb.lua needs to be in its own file and not in balances.lua to avoid circular dependencies +local hb = {} + +---@param oldBalances table A table of addresses and their balances +---@return table affectedBalancesAddresses table of addresses that have had balance changes +function hb.patchBalances(oldBalances) + assert(type(oldBalances) == "table", "Old balances must be a table") + local affectedBalancesAddresses = {} + for address, _ in pairs(oldBalances) do + if Balances[address] ~= oldBalances[address] then + affectedBalancesAddresses[address] = true + end + end + for address, _ in pairs(Balances) do + if oldBalances[address] ~= Balances[address] then + affectedBalancesAddresses[address] = true + end + end + + --- For simplicity we always include the protocol balance in the patch message + --- this also prevents us from sending an empty patch message and deleting the entire hyperbeam balances table\ + + local patchMessage = { + device = "patch@1.0", + balances = { [ao.id] = tostring(Balances[ao.id] or 0) }, + } + for address, _ in pairs(affectedBalancesAddresses) do + patchMessage.balances[address] = tostring(Balances[address] or 0) + end + + -- only send the patch message if there are affected balances, otherwise we'll end up deleting the entire hyperbeam balances table + if next(patchMessage.balances) == nil then + return {} + else + ao.send(patchMessage) + end + + return affectedBalancesAddresses +end + +return hb diff --git a/src/main.lua b/src/main.lua index f743b9cf..7b8e675d 100644 --- a/src/main.lua +++ b/src/main.lua @@ -4,6 +4,7 @@ local token = require(".src.token") local utils = require(".src.utils") local json = require(".src.json") local balances = require(".src.balances") +local hb = require(".src.hb") local arns = require(".src.arns") local gar = require(".src.gar") local demand = require(".src.demand") @@ -90,6 +91,8 @@ local ActionMap = { ApprovePrimaryNameRequest = "Approve-Primary-Name-Request", PrimaryNames = "Primary-Names", PrimaryName = "Primary-Name", + -- Hyperbeam Patch Balances + PatchHyperbeamBalances = "Patch-Hyperbeam-Balances", } --- @param msg ParsedMessage @@ -471,6 +474,12 @@ local function addEventingHandler(handlerName, pattern, handleFn, critical, prin critical = critical or false printEvent = printEvent == nil and true or printEvent Handlers.add(handlerName, pattern, function(msg) + -- Store the old balances to compare after the handler has run for patching state + -- Only do this for the last handler to avoid unnecessary copying + local oldBalances = nil + if pattern(msg) ~= "continue" then + oldBalances = utils.deepCopy(Balances) + end -- add an ARIOEvent to the message if it doesn't exist msg.ioEvent = msg.ioEvent or ARIOEvent(msg) -- global handler for all eventing errors, so we can log them and send a notice to the sender for non critical errors and discard the memory on critical errors @@ -494,6 +503,11 @@ local function addEventingHandler(handlerName, pattern, handleFn, critical, prin error(errorWithEvent, 0) -- 0 ensures not to include this line number in the error message end + -- Send patch message to HB + if oldBalances then + hb.patchBalances(oldBalances) + end + msg.ioEvent:addField("Handler-Memory-KiB-Used", collectgarbage("count"), false) collectgarbage("collect") msg.ioEvent:addField("Final-Memory-KiB-Used", collectgarbage("count"), false) @@ -2137,17 +2151,12 @@ end) -- Pagination handlers addEventingHandler("paginatedRecords", function(msg) - return msg.Action == "Paginated-Records" or msg.Action == ActionMap.Records + return msg.Action == "Paginated-Records" or msg.Action == ActionMap.Records end, function(msg) - local page = utils.parsePaginationTags(msg) - local result = arns.getPaginatedRecords( - page.cursor, - page.limit, - page.sortBy or "startTimestamp", - page.sortOrder, - page.filters - ) - Send(msg, { Target = msg.From, Action = "Records-Notice", Data = json.encode(result) }) + local page = utils.parsePaginationTags(msg) + local result = + arns.getPaginatedRecords(page.cursor, page.limit, page.sortBy or "startTimestamp", page.sortOrder, page.filters) + Send(msg, { Target = msg.From, Action = "Records-Notice", Data = json.encode(result) }) end) addEventingHandler("paginatedGateways", function(msg) @@ -2691,4 +2700,26 @@ addEventingHandler("allPaginatedGatewayVaults", utils.hasMatchingTag("Action", " Send(msg, { Target = msg.From, Action = "All-Gateway-Vaults-Notice", Data = json.encode(result) }) end) +addEventingHandler(ActionMap.PatchHyperbeamBalances, function(msg) + if msg.Tags.Action == ActionMap.PatchHyperbeamBalances then + return "continue" + end + return false +end, function(msg) + assert(msg.From == Owner, "Only the owner can trigger " .. ActionMap.PatchHyperbeamBalances) + + local patchBalances = {} + for address, balance in pairs(Balances) do + patchBalances[address] = tostring(balance) + end + + local patchMessage = { device = "patch@1.0", balances = patchBalances } + ao.send(patchMessage) + + return Send(msg, { + Target = msg.From, + Action = ActionMap.PatchHyperbeamBalances .. "-Notice", + }) +end) + return main diff --git a/tests/arns.test.mjs b/tests/arns.test.mjs index 146c149c..f9650fc9 100644 --- a/tests/arns.test.mjs +++ b/tests/arns.test.mjs @@ -819,7 +819,7 @@ describe('ArNS', async () => { }); // should send three messages including a Buy-Name-Notice and a Debit-Notice - assert.equal(newBuyResult.Messages.length, 2); + assert.equal(newBuyResult.Messages.length, 3); // should send a buy record notice const buyRecordNoticeTag = newBuyResult.Messages?.[0]?.Tags?.find( @@ -1030,7 +1030,7 @@ describe('ArNS', async () => { }); // should send three messages including a Buy-Name-Notice and a Debit-Notice - assert.equal(buyReturnedNameResult.Messages.length, 2); + assert.equal(buyReturnedNameResult.Messages.length, 3); // should send a buy record notice const buyRecordNoticeTag = diff --git a/tests/code-patches/2025-10-07-hb-balances-patch/CWxzoe4IoNpFHiykadZWphZtLWybDF8ocNi7gmK6zCg.wasm b/tests/code-patches/2025-10-07-hb-balances-patch/CWxzoe4IoNpFHiykadZWphZtLWybDF8ocNi7gmK6zCg.wasm new file mode 100644 index 00000000..1f2eefdd Binary files /dev/null and b/tests/code-patches/2025-10-07-hb-balances-patch/CWxzoe4IoNpFHiykadZWphZtLWybDF8ocNi7gmK6zCg.wasm differ diff --git a/tests/code-patches/2025-10-07-hb-balances-patch/index.test.mjs b/tests/code-patches/2025-10-07-hb-balances-patch/index.test.mjs new file mode 100644 index 00000000..a159083b --- /dev/null +++ b/tests/code-patches/2025-10-07-hb-balances-patch/index.test.mjs @@ -0,0 +1,336 @@ +import fs from 'node:fs'; +import path from 'node:path'; +import { describe, it } from 'node:test'; +import assert from 'node:assert'; +import { assertNoResultError, createAosLoader } from '../../utils.mjs'; +import { + AO_LOADER_HANDLER_ENV, + DEFAULT_HANDLE_OPTIONS, + INITIAL_OPERATOR_STAKE, + STUB_ADDRESS, + STUB_BLOCK_HEIGHT, + STUB_HASH_CHAIN, + STUB_TIMESTAMP, +} from '../../../tools/constants.mjs'; + +const __dirname = path.dirname(new URL(import.meta.url).pathname); + +const mainnetLuaFile = fs.readFileSync( + path.join(__dirname, 'main-aos-bundled.lua'), + { encoding: 'utf-8' }, +); + +const patchFile = fs.readFileSync( + path.join(__dirname, '../../../patches/2025-10-07-hb-balances-patch.lua'), + { encoding: 'utf-8' }, +); + +const wasmMemory = fs.readFileSync( + path.join( + __dirname, + 'qNvAoz0TgcH7DMg8BCVn8jF32QH5L6T29VjHxhHqqGE-2025-10-07-wasm-memory', + ), +); + +const wasm = fs.readFileSync( + path.join(__dirname, 'CWxzoe4IoNpFHiykadZWphZtLWybDF8ocNi7gmK6zCg.wasm'), +); + +const { handle: originalHandle, memory } = await createAosLoader({ + wasm, + lua: mainnetLuaFile, +}); +// We are using new helpers here because our other integration tests are using a different module id. +const startMemory = memory; + +/** + * + * @param {{ + * options: Object, + * memory: WebAssembly.Memory, + * shouldAssertNoResultError: boolean + * }} options + * @returns {Promise} + */ +async function handle({ + options = {}, + memory = startMemory, + shouldAssertNoResultError = true, + timestamp = Date.now().toString(), + blockHeight = STUB_BLOCK_HEIGHT, + hashchain = STUB_HASH_CHAIN, +}) { + options.Timestamp ??= timestamp; + options['Block-Height'] ??= blockHeight; + options['Hash-Chain'] ??= hashchain; + const result = await originalHandle( + memory, + { + ...DEFAULT_HANDLE_OPTIONS, + ...options, + Target: 'qNvAoz0TgcH7DMg8BCVn8jF32QH5L6T29VjHxhHqqGE', + }, + { + ...AO_LOADER_HANDLER_ENV, + Process: { + ...AO_LOADER_HANDLER_ENV.Process, + Id: 'qNvAoz0TgcH7DMg8BCVn8jF32QH5L6T29VjHxhHqqGE', + Tags: [ + ...AO_LOADER_HANDLER_ENV.Process.Tags, + { + name: 'Authority', + value: 'fcoN_xJeisVsPXA-trzVAuIiqO3ydLQxM-L4XbrQKzY', + }, + ], + }, + }, + ); + if (shouldAssertNoResultError) { + assertNoResultError(result); + } + return result; +} + +const processOwner = 'My21NOHZyyeQG0t0yANsWjRakNDM7CJvd8urtdMLEDE'; +const processId = 'qNvAoz0TgcH7DMg8BCVn8jF32QH5L6T29VjHxhHqqGE'; +const authority = 'fcoN_xJeisVsPXA-trzVAuIiqO3ydLQxM-L4XbrQKzY'; + +const transfer = async ({ + recipient = STUB_ADDRESS, + quantity = INITIAL_OPERATOR_STAKE, + memory = startMemory, + cast = false, + timestamp = STUB_TIMESTAMP, +} = {}) => { + if (quantity === 0) { + // Nothing to do + return memory; + } + + const transferResult = await handle({ + options: { + From: processId, + Owner: authority, + Tags: [ + { name: 'Action', value: 'Transfer' }, + { name: 'Recipient', value: recipient }, + { name: 'Quantity', value: quantity }, + { name: 'Cast', value: cast }, + ], + Timestamp: timestamp, + }, + memory, + }); + assertNoResultError(transferResult); + return transferResult.Memory; +}; + +export const getBalances = async ({ memory, timestamp = STUB_TIMESTAMP }) => { + // assert(memory, 'Memory is required'); + const result = await handle({ + options: { + From: processId, + Owner: authority, + Tags: [{ name: 'Action', value: 'Balances' }], + }, + timestamp, + memory, + }); + + const balancesData = result.Messages?.[0]?.Data; + if (!balancesData) { + const { Memory, ...rest } = result; + assert(false, `Something went wrong: ${JSON.stringify(rest, null, 2)}`); + } + const balances = JSON.parse(result.Messages?.[0]?.Data); + return balances; +}; + +describe('2025-10-07-hb-balances-patch', () => { + it('should eval the patch file', async () => { + const { memory: evalPatchMemory, ...rest } = await handle({ + options: { + From: processOwner, + Owner: processOwner, + Tags: [{ name: 'Action', value: 'Eval' }], + Data: patchFile, + }, + memory: wasmMemory, + }); + + console.dir(rest, { depth: null }); + + const balances = await handle({ + options: { + From: processOwner, + Owner: processOwner, + Tags: [{ name: 'Action', value: 'Balances' }], + }, + memory: evalPatchMemory, + }); + + console.dir(balances, { depth: null }); + }); + + // describe('hyperbeam patch balances', async () => { + // it('should handle sending a patch to a newly created address', async () => { + // const sender = STUB_ADDRESS; + // const recipient = ''.padEnd(43, 'a'); + // const quantity = 100000000; + // const transferToSenderAddressMemory = await transfer({ + // recipient: sender, + // quantity, + // }); + // const transferToRecipientAddress = await handle({ + // options: { + // From: sender, + // Owner: sender, + // Tags: [ + // { name: 'Action', value: 'Transfer' }, + // { name: 'Recipient', value: recipient }, + // { name: 'Quantity', value: String(quantity / 2) }, + // ], + // Timestamp: STUB_TIMESTAMP, + // }, + // memory: transferToSenderAddressMemory, + // }); + // console.dir(transferToRecipientAddress, { depth: null }); + // const patchMessage = transferToRecipientAddress.Messages.at(-1); + // const patchData = patchMessage.Tags.find( + // (tag) => tag.name === 'balances', + // ).value; + // assert.equal(patchData[sender], quantity / 2); + // assert.equal(patchData[recipient], quantity / 2); + // }); + + // it('should handle sending a patch that drains an address', async () => { + // const sender = STUB_ADDRESS; + // const recipient = ''.padEnd(43, 'a'); + // const quantity = 100000000; + // const transferToSenderAddressMemory = await transfer({ + // recipient: sender, + // quantity, + // }); + // const balancesAfterTransfer = await getBalances({ + // memory: transferToSenderAddressMemory, + // }); + // console.dir(balancesAfterTransfer, { depth: null }); + // const transferToRecipientAddress = await handle({ + // options: { + // From: sender, + // Owner: sender, + // Tags: [ + // { name: 'Action', value: 'Transfer' }, + // { name: 'Recipient', value: recipient }, + // { name: 'Quantity', value: String(quantity / 2) }, + // ], + // Timestamp: STUB_TIMESTAMP, + // }, + // memory: transferToSenderAddressMemory, + // }); + // const balancesAfterTransferToRecipient = await getBalances({ + // memory: transferToRecipientAddress.Memory, + // }); + // const patchMessage = transferToRecipientAddress.Messages.at(-1); + // const patchData = patchMessage.Tags.find( + // (tag) => tag.name === 'balances', + // ).value; + // assert.equal(patchData[sender], quantity / 2); + // assert.equal(patchData[recipient], quantity / 2); + + // const transferToDrainerAddress = await handle({ + // options: { + // From: sender, + // Owner: sender, + // Tags: [ + // { name: 'Action', value: 'Transfer' }, + // { name: 'Recipient', value: recipient }, + // { name: 'Quantity', value: String(quantity / 2) }, + // ], + // Timestamp: STUB_TIMESTAMP, + // }, + // memory: transferToRecipientAddress.Memory, + // }); + // const balancesAfterDrain = await getBalances({ + // memory: transferToDrainerAddress.Memory, + // }); + + // const patchMessage2 = transferToDrainerAddress.Messages.at(-1); + // const patchData2 = patchMessage2.Tags.find( + // (tag) => tag.name === 'balances', + // ).value; + // assert.equal(patchData2[sender], 0); + // assert.equal(patchData2[recipient], quantity); + // }); + + // it('should handle sending a patch when an address is removed from balances', async () => { + // const sender = STUB_ADDRESS; + // const recipient = ''.padEnd(43, 'a'); + // const quantity = 100000000; + // const transferToSenderAddressMemory = await transfer({ + // recipient: sender, + // quantity, + // }); + // const transferToRecipientAddress = await handle({ + // options: { + // From: sender, + // Owner: sender, + // Tags: [ + // { name: 'Action', value: 'Transfer' }, + // { name: 'Recipient', value: recipient }, + // { name: 'Quantity', value: String(quantity / 2) }, + // ], + // Timestamp: STUB_TIMESTAMP, + // }, + // memory: transferToSenderAddressMemory, + // }); + // const patchMessage = transferToRecipientAddress.Messages.at(-1); + // const patchData = patchMessage.Tags.find( + // (tag) => tag.name === 'balances', + // ).value; + // assert.equal(patchData[sender], quantity / 2); + // assert.equal(patchData[recipient], quantity / 2); + + // const transferToDrainerAddress = await handle({ + // options: { + // From: sender, + // Owner: sender, + // Tags: [ + // { name: 'Action', value: 'Transfer' }, + // { name: 'Recipient', value: recipient }, + // { name: 'Quantity', value: String(quantity / 2) }, + // ], + // Timestamp: STUB_TIMESTAMP, + // }, + // memory: transferToRecipientAddress.Memory, + // }); + + // const patchMessage2 = transferToDrainerAddress.Messages.at(-1); + // const patchData2 = patchMessage2.Tags.find( + // (tag) => tag.name === 'balances', + // ).value; + // assert.equal(patchData2[sender], 0); + // assert.equal(patchData2[recipient], quantity); + + // const balancesBeforeCleanup = await getBalances({ + // memory: transferToDrainerAddress.Memory, + // }); + + // const tokenSupplyRes = await handle({ + // options: { + // Tags: [{ name: 'Action', value: 'Total-Supply' }], + // }, + // memory: transferToDrainerAddress.Memory, + // }); + // const balancesAfterCleanup = await getBalances({ + // memory: tokenSupplyRes.Memory, + // }); + + // const patchMessage3 = tokenSupplyRes.Messages.at(-1); + // const patchData3 = patchMessage3.Tags.find( + // (tag) => tag.name === 'balances', + // ).value; + // assert.equal(patchData3[sender], 0); + // }); + // }); +}); diff --git a/tests/code-patches/2025-10-07-hb-balances-patch/main-aos-bundled.lua b/tests/code-patches/2025-10-07-hb-balances-patch/main-aos-bundled.lua new file mode 100644 index 00000000..d5586f71 --- /dev/null +++ b/tests/code-patches/2025-10-07-hb-balances-patch/main-aos-bundled.lua @@ -0,0 +1,11341 @@ +--[[ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. + +]] + +-- module: ".src.constants" +local function _loaded_mod_src_constants() + --[[ + NOTE: constants is used throughout the codebase, so avoid imports of any modules in this file to prevent circular dependencies +]] + -- + local constants = {} + + --- Process constants + constants.LOGO = "qUjrTmHdVjXX4D6rU6Fik02bUOzWkOR6oOqUg39g4-s" + constants.TICKER = "ARIO" + constants.NAME = "ARIO" + constants.DENOMINATION = 6 + + -- intentionally not exposed so all callers use ARIOToMARIO for consistency + local mARIO_PER_ARIO = 10 ^ constants.DENOMINATION -- 1 million mARIO per ARIO + + --- @param ARIO number + --- @return mARIO mARIO the amount of mario for the given ARIO + function constants.ARIOToMARIO(ARIO) + return math.floor(ARIO * mARIO_PER_ARIO) + end + + --- @param days number + --- @return number milliseconds the number of days in milliseconds + function constants.daysToMs(days) + return days * constants.hoursToMs(24) + end + + --- @param minutes number + --- @return number milliseconds the number of minutes in milliseconds + function constants.minutesToMs(minutes) + return minutes * constants.secondsToMs(60) + end + + function constants.secondsToMs(seconds) + return seconds * 1000 + end + + --- @param years number + --- @return number milliseconds the number of years in milliseconds + function constants.yearsToMs(years) + return years * constants.daysToMs(365) + end + + function constants.hoursToMs(hours) + return hours * constants.minutesToMs(60) + end + + -- TOKEN SUPPLY + constants.TOTAL_TOKEN_SUPPLY = constants.ARIOToMARIO(10 ^ 9) -- 1 billion tokens + constants.DEFAULT_PROTOCOL_BALANCE = constants.ARIOToMARIO(65 * (10 ^ 6)) -- 65M ARIO + constants.MIN_UNSAFE_ADDRESS_LENGTH = 1 + constants.MAX_UNSAFE_ADDRESS_LENGTH = 128 + + -- EPOCHS + constants.DEFAULT_EPOCH_SETTINGS = { + prescribedNameCount = 2, -- observers choose 8 names per epoch + maxObservers = 50, + epochZeroStartTimestamp = 1741176000000, -- March 5, 2025 12:00:00 UTC (7AM EST) + durationMs = constants.daysToMs(1), -- 1 day + } + + -- DISTRIBUTIONS + --[[ + Distribution rewards will be 0.1% of the protocol balance for the first year, + then decay linearly to 0.05% of the protocol balance after the first year until 1.5 years. + After 1.5 years, the reward rate will be 0.05% of the protocol balance. +]] + -- + constants.DEFAULT_DISTRIBUTION_SETTINGS = { + maximumRewardRate = 0.001, -- 0.1% of the protocol balance for the first year + minimumRewardRate = 0.0005, -- 0.05% of the protocol balance after the first year + rewardDecayStartEpoch = 365, -- one year of epochs before it kicks in + rewardDecayLastEpoch = 547, -- 1.5 years of epochs before it stops (365 + 182) + gatewayOperatorRewardRate = 0.9, -- (90%) the rate of rewards that go to the gateway operators + observerRewardRate = 0.1, -- (10%) the rate of rewards that go to the observers + missedObservationPenaltyRate = 0.25, -- (25%) penalty for gateways that receive rewards but did not observe + } + + -- Gateway Address Registry + constants.MIN_WITHDRAWAL_AMOUNT = constants.ARIOToMARIO(1) + constants.DEFAULT_GAR_SETTINGS = { + observers = { + tenureWeightDurationMs = constants.daysToMs(180), -- 180 days in ms + maxTenureWeight = 4, -- the maximum tenure weight, reached when a gateway has been running for 2 years or more + }, + operators = { + minStake = constants.ARIOToMARIO(10000), -- 10,000 ARIO + withdrawLengthMs = constants.daysToMs(90), -- 90 days to lower operator stake + leaveLengthMs = constants.daysToMs(90), -- 90 days that balance will be vaulted + failedEpochCountMax = 30, -- number of epochs failed before marked as leaving + failedGatewaySlashRate = 1, -- (100%) applied to the minimum operator stake, the rest is vaulted + maxDelegateRewardSharePct = 95, -- (95%) the maximum percentage of rewards that can be shared with delegates, intentionally represented as a percentage (vs. rate) + }, + delegates = { + minStake = constants.ARIOToMARIO(10), -- 10 ARIO + withdrawLengthMs = constants.daysToMs(90), -- 90 days once withdraw is requested, subject to redelegation rules and penalties for early withdrawal + }, + redelegations = { + minRedelegationPenaltyRate = 0.10, -- (10%) the minimum penalty rate for a redelegation + maxRedelegationPenaltyRate = 0.60, -- (60%) the maximum penalty rate for a redelegation + minRedelegationAmount = constants.MIN_WITHDRAWAL_AMOUNT, -- the minimum amount that can be redelegated + redelegationFeeResetIntervalMs = constants.daysToMs(7), -- 7 days + }, + expeditedWithdrawals = { + minExpeditedWithdrawalPenaltyRate = 0.10, -- (10%) the minimum penalty rate for an expedited withdrawal + maxExpeditedWithdrawalPenaltyRate = 0.50, -- (50%) the maximum penalty rate for an expedited withdrawal + minExpeditedWithdrawalAmount = constants.MIN_WITHDRAWAL_AMOUNT, -- the minimum amount that can be expedited + }, + } + + -- VAULTS + constants.MIN_VAULT_SIZE = constants.ARIOToMARIO(100) -- 100 ARIO - primarily to avoid state bloat and encouraged consolidated vaults + constants.MAX_TOKEN_LOCK_TIME_MS = constants.yearsToMs(200) -- The maximum amount of years tokens can be locked in a vault (200 years) + constants.MIN_TOKEN_LOCK_TIME_MS = constants.daysToMs(14) -- The minimum amount of years tokens can be locked in a vault (14 days) + + -- ARNS + constants.DEFAULT_UNDERNAME_COUNT = 10 + constants.MAX_BASE_NAME_LENGTH = 51 -- gateways utilize sandbox subdomains of 52 characters in length. This reduces complexity of sandboxing. + constants.MAX_PRIMARY_NAME_LENGTH = 63 -- The max length of a domain label (any part separated by dots) + constants.MAX_UNDERNAME_LENGTH = 61 -- MAX_PRIMARY_NAME_LENGTH - 1 (for underscore) - 1 (minimum base name length) + constants.MIN_NAME_LENGTH = 1 + -- Regex pattern to validate ARNS names: + -- - Starts with an alphanumeric character (%w) + -- - Can contain alphanumeric characters and hyphens (%w-) + -- - Ends with an alphanumeric character (%w) + -- - Does not allow names to start or end with a hyphen + constants.ARNS_NAME_SINGLE_CHAR_REGEX = "^%w$" + constants.ARNS_NAME_MULTICHARACTER_REGEX = "^%w[%w-]*%w$" + constants.UNDERNAME_REGEX = "^%w+[%w_-]*$" + constants.PERMABUY_LEASE_FEE_LENGTH_YEARS = 20 -- buying a permabuy record is equal to leasing the name for 20 years + constants.ANNUAL_PERCENTAGE_FEE = 0.2 -- the fee applied for leases against the base name + constants.UNDERNAME_LEASE_FEE_PERCENTAGE = 0.001 -- for leased names the undername fee is 0.1% for one undername + constants.UNDERNAME_PERMABUY_FEE_PERCENTAGE = 0.005 -- for permabuy names the undername fee is 0.5% for one undername + constants.GRACE_PERIOD_DURATION_MS = constants.daysToMs(14) -- the grace period for expired names + constants.MAX_LEASE_LENGTH_YEARS = 5 -- the maximum number of years a name can be leased for + -- the returned period for names that have expired beyond their grace period or manually returned, where a multiplier is applied for purchasing + constants.RETURNED_NAME_DURATION_MS = constants.daysToMs(14) + constants.RETURNED_NAME_MAX_MULTIPLIER = 50 -- Freshly returned names will have a multiplier of 50x + constants.PRIMARY_NAME_REQUEST_DEFAULT_NAME_LENGTH = 51 -- primary name requests cost the same as a single undername on a 51 character name + constants.PRIMARY_NAME_REQUEST_DURATION_MS = constants.daysToMs(7) -- the duration of a primary name request + constants.GATEWAY_OPERATOR_ARNS_DISCOUNT_PERCENTAGE = 0.2 -- operator discount applied to arns requests + -- the tenure weight threshold for eligibility for the arns discount (you need to be an operator for 6 months to qualify) + constants.GATEWAY_OPERATOR_ARNS_DISCOUNT_TENURE_WEIGHT_ELIGIBILITY_THRESHOLD = 1 + -- the gateway performance ratio threshold for eligibility for the arns discount (you need to have a 85% performance ratio to qualify) + constants.GATEWAY_OPERATOR_ARNS_DISCOUNT_PERFORMANCE_RATIO_ELIGIBILITY_THRESHOLD = 0.90 -- gateway must achieve a 90% performance ratio to qualify + constants.GATEWAY_OPERATOR_ARNS_DISCOUNT_NAME = "Gateway Operator ArNS Discount" -- the name of the discount applied to arns requests + + -- Genesis Fees -- Mainnet + -- Characters (ARIO) (mARIO) + -- 1 1,000,000 1,000,000,000,000 + -- 2 200,000 200,000,000,000 + -- 3 20,000 20,000,000,000 + -- 4 10,000 10,000,000,000 + -- 5 2,500 2,500,000,000 + -- 6 1,500 1,500,000,000 + -- 7 800 800,000,000 + -- 8 500 500,000,000 + -- 9 400 400,000,000 + -- 10 350 350,000,000 + -- 11 300 300,000,000 + -- 12 250 250,000,000 + -- 13+ 200 200,000,000 + + constants.DEFAULT_GENESIS_FEES = { + [1] = constants.ARIOToMARIO(1000000), + [2] = constants.ARIOToMARIO(200000), + [3] = constants.ARIOToMARIO(20000), + [4] = constants.ARIOToMARIO(10000), + [5] = constants.ARIOToMARIO(2500), + [6] = constants.ARIOToMARIO(1500), + [7] = constants.ARIOToMARIO(800), + [8] = constants.ARIOToMARIO(500), + [9] = constants.ARIOToMARIO(400), + [10] = constants.ARIOToMARIO(350), + [11] = constants.ARIOToMARIO(300), + [12] = constants.ARIOToMARIO(250), + [13] = constants.ARIOToMARIO(200), + [14] = constants.ARIOToMARIO(200), + [15] = constants.ARIOToMARIO(200), + [16] = constants.ARIOToMARIO(200), + [17] = constants.ARIOToMARIO(200), + [18] = constants.ARIOToMARIO(200), + [19] = constants.ARIOToMARIO(200), + [20] = constants.ARIOToMARIO(200), + [21] = constants.ARIOToMARIO(200), + [22] = constants.ARIOToMARIO(200), + [23] = constants.ARIOToMARIO(200), + [24] = constants.ARIOToMARIO(200), + [25] = constants.ARIOToMARIO(200), + [26] = constants.ARIOToMARIO(200), + [27] = constants.ARIOToMARIO(200), + [28] = constants.ARIOToMARIO(200), + [29] = constants.ARIOToMARIO(200), + [30] = constants.ARIOToMARIO(200), + [31] = constants.ARIOToMARIO(200), + [32] = constants.ARIOToMARIO(200), + [33] = constants.ARIOToMARIO(200), + [34] = constants.ARIOToMARIO(200), + [35] = constants.ARIOToMARIO(200), + [36] = constants.ARIOToMARIO(200), + [37] = constants.ARIOToMARIO(200), + [38] = constants.ARIOToMARIO(200), + [39] = constants.ARIOToMARIO(200), + [40] = constants.ARIOToMARIO(200), + [41] = constants.ARIOToMARIO(200), + [42] = constants.ARIOToMARIO(200), + [43] = constants.ARIOToMARIO(200), + [44] = constants.ARIOToMARIO(200), + [45] = constants.ARIOToMARIO(200), + [46] = constants.ARIOToMARIO(200), + [47] = constants.ARIOToMARIO(200), + [48] = constants.ARIOToMARIO(200), + [49] = constants.ARIOToMARIO(200), + [50] = constants.ARIOToMARIO(200), + [51] = constants.ARIOToMARIO(200), + } + + --[[ + DEMAND FACTOR: + The demand factor is used to adjust the fees for ARNS purchases. It is a moving average of the trailing period purchases and revenues. + The formula to compute how many periods it would take to reset fees is math.ceil(log(demandFactorMin) / log(1 - demandFactorDownAdjustmentRate)) + maxPeriodsAtMinDemandFactor. + With these values, it would take 46 periods to get to the minimum demand factor of 0.5. Then an additional 7 periods to reset fees to half of the initial fees. +]] + constants.DEFAULT_DEMAND_FACTOR = { + currentPeriod = 1, -- one based index of the current period + trailingPeriodPurchases = { 0, 0, 0, 0, 0, 0, 0 }, -- Acts as a ring buffer of trailing period purchase counts + trailingPeriodRevenues = { 0, 0, 0, 0, 0, 0, 0 }, -- Acts as a ring buffer of trailing period revenues + purchasesThisPeriod = 0, + revenueThisPeriod = 0, + currentDemandFactor = 1, + consecutivePeriodsWithMinDemandFactor = 0, + fees = constants.DEFAULT_GENESIS_FEES, + } + constants.DEFAULT_DEMAND_FACTOR_SETTINGS = { + periodZeroStartTimestamp = 1740009600000, -- 2025-02-20T00:00:00Z + movingAvgPeriodCount = 7, -- the number of periods to use for the moving average + periodLengthMs = constants.daysToMs(1), -- one day in milliseconds + demandFactorBaseValue = 1, -- the base demand factor value that is what the demand factor is reset to when fees are reset + demandFactorMin = 0.5, -- the minimum demand factor allowed, after which maxPeriodsAtMinDemandFactor is applied and fees are reset + demandFactorUpAdjustmentRate = 0.05, -- (5%) the rate at which the demand factor increases each period, if demand is increasing (1 + this number) + demandFactorDownAdjustmentRate = 0.015, -- (1.5%) the rate at which the demand factor decreases each period, if demand is decreasing (1 - this number) + maxPeriodsAtMinDemandFactor = 7, -- 7 consecutive periods with the minimum demand factor before fees are reset + criteria = "revenue", -- "revenue" or "purchases" + } + + return constants +end + +_G.package.loaded[".src.constants"] = _loaded_mod_src_constants() + +-- module: ".src.base64" +local function _loaded_mod_src_base64() + --[[ + + base64 -- v1.5.3 public domain Lua base64 encoder/decoder + no warranty implied; use at your own risk + + Needs bit32.extract function. If not present it's implemented using BitOp + or Lua 5.3 native bit operators. For Lua 5.1 fallbacks to pure Lua + implementation inspired by Rici Lake's post: + http://ricilake.blogspot.co.uk/2007/10/iterating-bits-in-lua.html + + author: Ilya Kolbin (iskolbin@gmail.com) + url: github.com/iskolbin/lbase64 + + COMPATIBILITY + + Lua 5.1+, LuaJIT + + LICENSE + + See end of file for license information. + +--]] + + local base64 = {} + + local extract = _G.bit32 and _G.bit32.extract -- Lua 5.2/Lua 5.3 in compatibility mode + if not extract then + if _G.bit then -- LuaJIT + local shl, shr, band = _G.bit.lshift, _G.bit.rshift, _G.bit.band + extract = function(v, from, width) + return band(shr(v, from), shl(1, width) - 1) + end + elseif _G._VERSION == "Lua 5.1" then + extract = function(v, from, width) + local w = 0 + local flag = 2 ^ from + for i = 0, width - 1 do + local flag2 = flag + flag + if v % flag2 >= flag then + w = w + 2 ^ i + end + flag = flag2 + end + return w + end + else -- Lua 5.3+ + extract = load([[return function( v, from, width ) + return ( v >> from ) & ((1 << width) - 1) + end]])() + end + end + + function base64.makeencoder(s62, s63, spad) + local encoder = {} + for b64code, char in pairs({ + [0] = "A", + "B", + "C", + "D", + "E", + "F", + "G", + "H", + "I", + "J", + "K", + "L", + "M", + "N", + "O", + "P", + "Q", + "R", + "S", + "T", + "U", + "V", + "W", + "X", + "Y", + "Z", + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + s62 or "+", + s63 or "/", + spad or "=", + }) do + encoder[b64code] = char:byte() + end + return encoder + end + + function base64.makedecoder(s62, s63, spad) + local decoder = {} + for b64code, charcode in pairs(base64.makeencoder(s62, s63, spad)) do + decoder[charcode] = b64code + end + return decoder + end + + local DEFAULT_ENCODER = base64.makeencoder("+", "/", "=") + local URL_ENCODER = base64.makeencoder("-", "_", "=") + local DEFAULT_DECODER = base64.makedecoder("+", "/", "=") + local URL_DECODER = base64.makedecoder("-", "_", "=") + + base64.URL_ENCODER = URL_ENCODER + base64.DEFAULT_ENCODER = base64.DEFAULT_ENCODER + base64.URL_DECODER = URL_DECODER + base64.DEFAULT_DECODER = DEFAULT_DECODER + + local char, concat = string.char, table.concat + + function base64.encode(str, encoder, usecaching) + encoder = encoder or DEFAULT_ENCODER + local t, k, n = {}, 1, #str + local lastn = n % 3 + local cache = {} + for i = 1, n - lastn, 3 do + local a, b, c = str:byte(i, i + 2) + local v = a * 0x10000 + b * 0x100 + c + local s + if usecaching then + s = cache[v] + if not s then + s = char( + encoder[extract(v, 18, 6)], + encoder[extract(v, 12, 6)], + encoder[extract(v, 6, 6)], + encoder[extract(v, 0, 6)] + ) + cache[v] = s + end + else + s = char( + encoder[extract(v, 18, 6)], + encoder[extract(v, 12, 6)], + encoder[extract(v, 6, 6)], + encoder[extract(v, 0, 6)] + ) + end + t[k] = s + k = k + 1 + end + if lastn == 2 then + local a, b = str:byte(n - 1, n) + local v = a * 0x10000 + b * 0x100 + t[k] = char(encoder[extract(v, 18, 6)], encoder[extract(v, 12, 6)], encoder[extract(v, 6, 6)], encoder[64]) + elseif lastn == 1 then + local v = str:byte(n) * 0x10000 + t[k] = char(encoder[extract(v, 18, 6)], encoder[extract(v, 12, 6)], encoder[64], encoder[64]) + end + return concat(t) + end + + function base64.decode(b64, decoder, usecaching) + decoder = decoder or DEFAULT_DECODER + local pattern = "[^%w%+%/%=]" + if decoder then + local s62, s63 + for charcode, b64code in pairs(decoder) do + if b64code == 62 then + s62 = charcode + elseif b64code == 63 then + s63 = charcode + end + end + pattern = ("[^%%w%%%s%%%s%%=]"):format(char(s62), char(s63)) + end + + -- Remove whitespace and invalid characters + b64 = b64:gsub("[\n\r%s]", ""):gsub(pattern, "") + + -- Handle excessive padding + while #b64 % 4 ~= 0 do + if b64:sub(-1) == "=" then + b64 = b64:sub(1, -2) -- Remove the last character if it's '=' + else + break -- Stop if the last character is not '=' + end + end + + -- Truncate at invalid '=' characters + local eqPos = b64:find("=") + if eqPos and eqPos < #b64 then + -- Trim excessive '=' characters within the string + b64 = b64:sub(1, eqPos - 1) + end + + -- Ensure the length is a multiple of 4 + local n = #b64 + local padding = b64:sub(-2) == "==" and 2 or b64:sub(-1) == "=" and 1 or 0 + if n % 4 ~= 0 then + b64 = b64 .. string.rep("=", 4 - (n % 4)) + padding = (4 - (n % 4)) % 4 -- Recalculate padding after adjustment + n = #b64 + end + + local cache = usecaching and {} + local t, k = {}, 1 + + for i = 1, n - 4, 4 do + local a, b, c, d = b64:byte(i, i + 3) + local s + + if a and b and c and d and decoder[a] and decoder[b] and decoder[c] and decoder[d] then + if usecaching then + local v0 = a * 0x1000000 + b * 0x10000 + c * 0x100 + d + s = cache[v0] + if not s then + local v = decoder[a] * 0x40000 + decoder[b] * 0x1000 + decoder[c] * 0x40 + decoder[d] + s = char(extract(v, 16, 8), extract(v, 8, 8), extract(v, 0, 8)) + cache[v0] = s + end + else + local v = decoder[a] * 0x40000 + decoder[b] * 0x1000 + decoder[c] * 0x40 + decoder[d] + s = char(extract(v, 16, 8), extract(v, 8, 8), extract(v, 0, 8)) + end + t[k] = s + k = k + 1 + end + end + + -- Handle the final block (based on padding) + if padding > 0 then + local a, b, c = b64:byte(n - 3, n) + local v + if padding == 1 then + if a == 61 and b == 61 and c == 61 then + -- Invalid case: final block is entirely padding + return concat(t) + elseif decoder[a] and decoder[b] and decoder[c] then + v = decoder[a] * 0x40000 + decoder[b] * 0x1000 + decoder[c] * 0x40 + t[k] = char(extract(v, 16, 8), extract(v, 8, 8)) + end + elseif padding == 2 then + if a == 61 and b == 61 then + -- Invalid case: final block is entirely padding + return concat(t) + elseif decoder[a] and decoder[b] then + v = decoder[a] * 0x40000 + decoder[b] * 0x1000 + t[k] = char(extract(v, 16, 8)) + end + end + else + local a, b, c, d = b64:byte(n - 3, n) + if decoder[a] and decoder[b] and decoder[c] and decoder[d] then + local v = decoder[a] * 0x40000 + decoder[b] * 0x1000 + decoder[c] * 0x40 + decoder[d] + t[k] = char(extract(v, 16, 8), extract(v, 8, 8), extract(v, 0, 8)) + elseif decoder[a] and decoder[b] and decoder[c] then + local v = decoder[a] * 0x40000 + decoder[b] * 0x1000 + decoder[c] * 0x40 + t[k] = char(extract(v, 16, 8), extract(v, 8, 8)) + end + end + + return concat(t) + end + + return base64 + + --[[ +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2018 Ilya Kolbin +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +--]] +end + +_G.package.loaded[".src.base64"] = _loaded_mod_src_base64() + +-- module: ".src.json" +local function _loaded_mod_src_json() + -- + -- json.lua + -- + -- Copyright (c) 2020 rxi + -- + -- Permission is hereby granted, free of charge, to any person obtaining a copy of + -- this software and associated documentation files (the "Software"), to deal in + -- the Software without restriction, including without limitation the rights to + -- use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + -- of the Software, and to permit persons to whom the Software is furnished to do + -- so, subject to the following conditions: + -- + -- The above copyright notice and this permission notice shall be included in all + -- copies or substantial portions of the Software. + -- + -- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + -- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + -- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + -- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + -- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + -- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + -- SOFTWARE. + -- + + local json = { _version = "0.1.2" } + + ------------------------------------------------------------------------------- + -- Encode + ------------------------------------------------------------------------------- + + local encode + + local escape_char_map = { + ["\\"] = "\\", + ['"'] = '"', + ["\b"] = "b", + ["\f"] = "f", + ["\n"] = "n", + ["\r"] = "r", + ["\t"] = "t", + } + + local escape_char_map_inv = { ["/"] = "/" } + for k, v in pairs(escape_char_map) do + escape_char_map_inv[v] = k + end + + local function escape_char(c) + return "\\" .. (escape_char_map[c] or string.format("u%04x", c:byte())) + end + + local function encode_nil() + return "null" + end + + local function encode_table(val, stack) + local res = {} + stack = stack or {} + + -- Circular reference? + if stack[val] then + error("circular reference") + end + + stack[val] = true + + if rawget(val, 1) ~= nil or next(val) == nil then + -- Treat as array -- check keys are valid and it is not sparse + local n = 0 + for k in pairs(val) do + if type(k) ~= "number" then + error("invalid table: mixed or invalid key types") + end + n = n + 1 + end + if n ~= #val then + error("invalid table: sparse array") + end + -- Encode + for _, v in ipairs(val) do + table.insert(res, encode(v, stack)) + end + stack[val] = nil + return "[" .. table.concat(res, ",") .. "]" + else + -- Treat as an object + for k, v in pairs(val) do + if type(k) ~= "string" then + error("invalid table: mixed or invalid key types") + end + table.insert(res, encode(k, stack) .. ":" .. encode(v, stack)) + end + stack[val] = nil + return "{" .. table.concat(res, ",") .. "}" + end + end + + local function encode_string(val) + return '"' .. val:gsub('[%z\1-\31\\"]', escape_char) .. '"' + end + + local function encode_number(val) + -- Check for NaN, -inf and inf + if val ~= val or val <= -math.huge or val >= math.huge then + error("unexpected number value '" .. tostring(val) .. "'") + end + -- Handle integer values separately to avoid floating-point conversion + if math.type(val) == "integer" then + return string.format("%d", val) -- Format as an integer + else + -- Use 20 significant digits for non-integer numbers + return string.format("%.20g", val) + end + end + + local type_func_map = { + ["nil"] = encode_nil, + ["table"] = encode_table, + ["string"] = encode_string, + ["number"] = encode_number, + ["boolean"] = tostring, + } + + encode = function(val, stack) + local t = type(val) + local f = type_func_map[t] + if f then + return f(val, stack) + end + error("unexpected type '" .. t .. "'") + end + + function json.encode(val) + return (encode(val)) + end + + ------------------------------------------------------------------------------- + -- Decode + ------------------------------------------------------------------------------- + + local parse + + local function create_set(...) + local res = {} + for i = 1, select("#", ...) do + res[select(i, ...)] = true + end + return res + end + + local space_chars = create_set(" ", "\t", "\r", "\n") + local delim_chars = create_set(" ", "\t", "\r", "\n", "]", "}", ",") + local escape_chars = create_set("\\", "/", '"', "b", "f", "n", "r", "t", "u") + local literals = create_set("true", "false", "null") + + local literal_map = { + ["true"] = true, + ["false"] = false, + ["null"] = nil, + } + + local function next_char(str, idx, set, negate) + for i = idx, #str do + if set[str:sub(i, i)] ~= negate then + return i + end + end + return #str + 1 + end + + local function decode_error(str, idx, msg) + local line_count = 1 + local col_count = 1 + for i = 1, idx - 1 do + col_count = col_count + 1 + if str:sub(i, i) == "\n" then + line_count = line_count + 1 + col_count = 1 + end + end + error(string.format("%s at line %d col %d", msg, line_count, col_count)) + end + + local function codepoint_to_utf8(n) + -- http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=iws-appendixa + local f = math.floor + if n <= 0x7f then + return string.char(n) + elseif n <= 0x7ff then + return string.char(f(n / 64) + 192, n % 64 + 128) + elseif n <= 0xffff then + return string.char(f(n / 4096) + 224, f(n % 4096 / 64) + 128, n % 64 + 128) + elseif n <= 0x10ffff then + return string.char(f(n / 262144) + 240, f(n % 262144 / 4096) + 128, f(n % 4096 / 64) + 128, n % 64 + 128) + end + error(string.format("invalid unicode codepoint '%x'", n)) + end + + local function parse_unicode_escape(s) + local n1 = tonumber(s:sub(1, 4), 16) + local n2 = tonumber(s:sub(7, 10), 16) + -- Surrogate pair? + if n2 then + return codepoint_to_utf8((n1 - 0xd800) * 0x400 + (n2 - 0xdc00) + 0x10000) + else + return codepoint_to_utf8(n1) + end + end + + local function parse_string(str, i) + local res = "" + local j = i + 1 + local k = j + + while j <= #str do + local x = str:byte(j) + + if x < 32 then + decode_error(str, j, "control character in string") + elseif x == 92 then -- `\`: Escape + res = res .. str:sub(k, j - 1) + j = j + 1 + local c = str:sub(j, j) + if c == "u" then + local hex = str:match("^[dD][89aAbB]%x%x\\u%x%x%x%x", j + 1) + or str:match("^%x%x%x%x", j + 1) + or decode_error(str, j - 1, "invalid unicode escape in string") + res = res .. parse_unicode_escape(hex) + j = j + #hex + else + if not escape_chars[c] then + decode_error(str, j - 1, "invalid escape char '" .. c .. "' in string") + end + res = res .. escape_char_map_inv[c] + end + k = j + 1 + elseif x == 34 then -- `"`: End of string + res = res .. str:sub(k, j - 1) + return res, j + 1 + end + + j = j + 1 + end + + decode_error(str, i, "expected closing quote for string") + end + + local function parse_number(str, i) + local x = next_char(str, i, delim_chars) + local s = str:sub(i, x - 1) + local n = tonumber(s) + if not n then + decode_error(str, i, "invalid number '" .. s .. "'") + end + return n, x + end + + local function parse_literal(str, i) + local x = next_char(str, i, delim_chars) + local word = str:sub(i, x - 1) + if not literals[word] then + decode_error(str, i, "invalid literal '" .. word .. "'") + end + return literal_map[word], x + end + + local function parse_array(str, i) + local res = {} + local n = 1 + i = i + 1 + while 1 do + local x + i = next_char(str, i, space_chars, true) + -- Empty / end of array? + if str:sub(i, i) == "]" then + i = i + 1 + break + end + -- Read token + x, i = parse(str, i) + res[n] = x + n = n + 1 + -- Next token + i = next_char(str, i, space_chars, true) + local chr = str:sub(i, i) + i = i + 1 + if chr == "]" then + break + end + if chr ~= "," then + decode_error(str, i, "expected ']' or ','") + end + end + return res, i + end + + local function parse_object(str, i) + local res = {} + i = i + 1 + while 1 do + local key, val + i = next_char(str, i, space_chars, true) + -- Empty / end of object? + if str:sub(i, i) == "}" then + i = i + 1 + break + end + -- Read key + if str:sub(i, i) ~= '"' then + decode_error(str, i, "expected string for key") + end + key, i = parse(str, i) + -- Read ':' delimiter + i = next_char(str, i, space_chars, true) + if str:sub(i, i) ~= ":" then + decode_error(str, i, "expected ':' after key") + end + i = next_char(str, i + 1, space_chars, true) + -- Read value + val, i = parse(str, i) + -- Set + res[key] = val + -- Next token + i = next_char(str, i, space_chars, true) + local chr = str:sub(i, i) + i = i + 1 + if chr == "}" then + break + end + if chr ~= "," then + decode_error(str, i, "expected '}' or ','") + end + end + return res, i + end + + local char_func_map = { + ['"'] = parse_string, + ["0"] = parse_number, + ["1"] = parse_number, + ["2"] = parse_number, + ["3"] = parse_number, + ["4"] = parse_number, + ["5"] = parse_number, + ["6"] = parse_number, + ["7"] = parse_number, + ["8"] = parse_number, + ["9"] = parse_number, + ["-"] = parse_number, + ["t"] = parse_literal, + ["f"] = parse_literal, + ["n"] = parse_literal, + ["["] = parse_array, + ["{"] = parse_object, + } + + parse = function(str, idx) + local chr = str:sub(idx, idx) + local f = char_func_map[chr] + if f then + return f(str, idx) + end + decode_error(str, idx, "unexpected character '" .. chr .. "'") + end + + function json.decode(str) + if type(str) ~= "string" then + error("expected argument of type string, got " .. type(str)) + end + local res, idx = parse(str, next_char(str, 1, space_chars, true)) + idx = next_char(str, idx, space_chars, true) + if idx <= #str then + decode_error(str, idx, "trailing garbage") + end + return res + end + + return json +end + +_G.package.loaded[".src.json"] = _loaded_mod_src_json() + +-- module: ".src.utils" +local function _loaded_mod_src_utils() + local utils = {} + local base64 = require(".src.base64") + local json = require(".src.json") + local constants = require(".src.constants") + + -- note: crypto is provided by the module so we reference it relative to process.lua in the module + local crypto = require(".crypto.init") + + function utils.hasMatchingTag(tag, value) + return Handlers.utils.hasMatchingTag(tag, value) + end + + --- Checks if a value is an integer + --- @param value any The value to check + --- @return boolean isInteger - whether the value is an integer + function utils.isInteger(value) + if value == nil then + return false + end + if type(value) == "string" then + value = tonumber(value) + end + return type(value) == "number" and value % 1 == 0 + end + + --- Rounds a number to a given precision + --- @param number number The number to round + --- @param precision number The precision to round to + --- @return number roundedNumber - the rounded number to the precision provided + function utils.roundToPrecision(number, precision) + return math.floor(number * (10 ^ precision) + 0.5) / (10 ^ precision) + end + + --- Sums the values of a table + --- @param tbl table The table to sum + --- @return number sum - the sum of the table values + function utils.sumTableValues(tbl) + local sum = 0 + for _, value in pairs(tbl) do + assert(type(value) == "number", "Table values must be numbers. Found: " .. type(value)) + sum = sum + value + end + return sum + end + + --- Slices a table + --- @param tbl table The table to slice + --- @param first number The first index to slice from + --- @param last number|nil The last index to slice to + --- @param step number|nil The step to slice by + --- @return table slicedTable - the sliced table + function utils.slice(tbl, first, last, step) + local sliced = {} + + for i = first or 1, last or #tbl, step or 1 do + sliced[#sliced + 1] = tbl[i] + end + + return sliced + end + + --- @class PaginationTags + --- @field cursor string|nil The cursor to paginate from + --- @field limit number The limit of results to return + --- @field sortBy string|nil The field to sort by + --- @field sortOrder string The order to sort by + --- @field filters table|nil Optional filters to apply + + --- Parses the pagination tags from a message + --- @param msg table The message provided to a handler (see ao docs for more info) + --- @return PaginationTags paginationTags - the pagination tags + function utils.parsePaginationTags(msg) + local cursor = msg.Tags.Cursor + local limit = tonumber(msg.Tags["Limit"]) or 100 + assert(limit <= 1000, "Limit must be less than or equal to 1000") + local sortOrder = msg.Tags["Sort-Order"] and string.lower(msg.Tags["Sort-Order"]) or "desc" + assert(sortOrder == "asc" or sortOrder == "desc", "Invalid sortOrder: expected 'asc' or 'desc'") + local sortBy = msg.Tags["Sort-By"] + local filters = utils.safeDecodeJson(msg.Tags.Filters) + assert(msg.Tags.Filters == nil or filters ~= nil, "Invalid JSON supplied in Filters tag") + return { + cursor = cursor, + limit = limit, + sortBy = sortBy, + sortOrder = sortOrder, + filters = filters, + } + end + + --- Sorts a table by multiple fields with specified orders for each field. + --- Supports tables of non-table values by using `nil` as a field name. + --- Each field is provided as a table with 'field' (string|nil) and 'order' ("asc" or "desc"). + --- Supports nested fields using dot notation. + --- @param prevTable table The table to sort + --- @param fields table A list of fields with order specified, e.g., { { field = "name", order = "asc" } } + --- @return table sortedTable - the sorted table + function utils.sortTableByFields(prevTable, fields) + -- Handle sorting for non-table values with possible nils + if fields[1].field == nil then + -- Separate non-nil values and count nil values + local nonNilValues = {} + local nilValuesCount = 0 + + for _, value in pairs(prevTable) do -- Use pairs instead of ipairs to include all elements + if value == nil then + nilValuesCount = nilValuesCount + 1 + else + table.insert(nonNilValues, value) + end + end + + -- Sort non-nil values + table.sort(nonNilValues, function(a, b) + if fields[1].order == "asc" then + return a < b + else + return a > b + end + end) + + -- Append nil values to the end + for _ = 1, nilValuesCount do + table.insert(nonNilValues, nil) + end + + return nonNilValues + end + + -- Deep copy for sorting complex nested values + local tableCopy = utils.deepCopy(prevTable) or {} + + -- If no elements or no fields, return the copied table as-is + if #tableCopy == 0 or #fields == 0 then + return tableCopy + end + + -- Helper function to retrieve a nested field value by path + local function getNestedValue(tbl, fieldPath) + local current = tbl + for segment in fieldPath:gmatch("[^.]+") do + if type(current) == "table" then + current = current[segment] + else + return nil + end + end + return current + end + + -- Sort table using table.sort with multiple fields and specified orders + table.sort(tableCopy, function(a, b) + for _, fieldSpec in ipairs(fields) do + local fieldPath = fieldSpec.field + local order = fieldSpec.order + local aField, bField + + -- Check if field is nil, treating a and b as simple values + if fieldPath == nil then + aField = a + bField = b + else + aField = getNestedValue(a, fieldPath) + bField = getNestedValue(b, fieldPath) + end + + -- Validate order + if order ~= "asc" and order ~= "desc" then + error("Invalid sort order. Expected 'asc' or 'desc'") + end + + -- Handle nil values to ensure they go to the end + if aField == nil and bField ~= nil then + return false + elseif aField ~= nil and bField == nil then + return true + elseif aField ~= nil and bField ~= nil then + -- Compare based on the specified order + if aField ~= bField then + if order == "asc" then + return aField < bField + else + return aField > bField + end + end + end + end + -- All fields are equal + return false + end) + + return tableCopy + end + + --- @class PaginatedTable + --- @field items table The items in the current page + --- @field limit number The limit of items to return + --- @field totalItems number The total number of items + --- @field sortBy string|nil The field to sort by, nil if sorting by the primitive items themselves + --- @field sortOrder string The order to sort by + --- @field nextCursor string|number|nil The cursor to the next page + --- @field hasMore boolean Whether there is a next page + + --- Paginate a table with a cursor + --- @param tableArray table The table to paginate + --- @param cursor string|number|nil The cursor to paginate from (optional) + --- @param cursorField string|nil The field to use as the cursor or nil for lists of primitives + --- @param limit number The limit of items to return + --- @param sortBy string|nil The field to sort by. Nil if sorting by the primitive items themselves. + --- @param sortOrder string The order to sort by ("asc" or "desc") + --- @param filters table|nil Optional filter table + --- @return PaginatedTable paginatedTable - the paginated table result + function utils.paginateTableWithCursor(tableArray, cursor, cursorField, limit, sortBy, sortOrder, filters) + local filterFn = nil + if type(filters) == "table" then + filterFn = utils.createFilterFunction(filters) + end + + local filteredArray = filterFn + and utils.filterArray(tableArray, function(_, value) + return filterFn(value) + end) + or tableArray + + assert(sortOrder == "asc" or sortOrder == "desc", "Invalid sortOrder: expected 'asc' or 'desc'") + local sortFields = { { order = sortOrder, field = sortBy } } + if cursorField ~= nil and cursorField ~= sortBy then + -- Tie-breaker to guarantee deterministic pagination + table.insert(sortFields, { order = "asc", field = cursorField }) + end + local sortedArray = utils.sortTableByFields(filteredArray, sortFields) + + if not sortedArray or #sortedArray == 0 then + return { + items = {}, + limit = limit, + totalItems = 0, + sortBy = sortBy, + sortOrder = sortOrder, + nextCursor = nil, + hasMore = false, + } + end + + local startIndex = 1 + + if cursor then + for i, obj in ipairs(sortedArray) do + if cursorField and obj[cursorField] == cursor or cursor == obj then + startIndex = i + 1 + break + end + end + end + + local items = {} + local endIndex = math.min(startIndex + limit - 1, #sortedArray) + + for i = startIndex, endIndex do + table.insert(items, sortedArray[i]) + end + + local nextCursor = nil + if endIndex < #sortedArray then + nextCursor = cursorField and sortedArray[endIndex][cursorField] or sortedArray[endIndex] + end + + return { + items = items, + limit = limit, + totalItems = #sortedArray, + sortBy = sortBy, + sortOrder = sortOrder, + nextCursor = nextCursor, -- the last item in the current page + hasMore = nextCursor ~= nil, + } + end + + --- Checks if an address is a valid Arweave address + --- @param address string The address to check + --- @return boolean # whether the address is a valid Arweave address + function utils.isValidArweaveAddress(address) + return type(address) == "string" and #address == 43 and string.match(address, "^[%w-_]+$") ~= nil + end + + --- Checks if an address looks like an unformatted Ethereum address + --- @param address string The address to check + --- @return boolean isValidUnformattedEthAddress - whether the address is a valid unformatted Ethereum address + function utils.isValidUnformattedEthAddress(address) + return type(address) == "string" and #address == 42 and string.match(address, "^0x[%x]+$") ~= nil + end + + --- Checks if an address is a valid Ethereum address and is in EIP-55 checksum format + --- @param address string The address to check + --- @return boolean isValidEthAddress - whether the address is a valid Ethereum address + function utils.isValidEthAddress(address) + return utils.isValidUnformattedEthAddress(address) and address == utils.formatEIP55Address(address) + end + + function utils.isValidUnsafeAddress(address) + if not address then + return false + end + local match = string.match(address, "^[%w_-]+$") + return match ~= nil + and #address >= constants.MIN_UNSAFE_ADDRESS_LENGTH + and #address <= constants.MAX_UNSAFE_ADDRESS_LENGTH + end + + --- Checks if an address is a valid AO address + --- @param address string|nil The address to check + --- @param allowUnsafe boolean|nil Whether to allow unsafe addresses, defaults to false + --- @return boolean # whether the address is valid, depending on the allowUnsafe flag + function utils.isValidAddress(address, allowUnsafe) + allowUnsafe = allowUnsafe or false -- default to false, only allow unsafe addresses if explicitly set + if not address then + return false + end + if allowUnsafe then + return utils.isValidUnsafeAddress(address) + end + return utils.isValidArweaveAddress(address) or utils.isValidEthAddress(address) + end + + --- Converts an address to EIP-55 checksum format + --- Assumes address has been validated as a valid Ethereum address (see utils.isValidEthAddress) + --- Reference: https://eips.ethereum.org/EIPS/eip-55 + --- @param address string The address to convert + --- @return string formattedAddress - the EIP-55 checksum formatted address + function utils.formatEIP55Address(address) + local hex = string.lower(string.sub(address, 3)) + + local hash = crypto.digest.keccak256(hex) + local hashHex = hash.asHex() + + local checksumAddress = "0x" + + for i = 1, #hashHex do + local hexChar = string.sub(hashHex, i, i) + local hexCharValue = tonumber(hexChar, 16) + local char = string.sub(hex, i, i) + if hexCharValue > 7 then + char = string.upper(char) + end + checksumAddress = checksumAddress .. char + end + + return checksumAddress + end + + --- Formats an address to EIP-55 checksum format if it is a valid Ethereum address + --- @param address string The address to format + --- @return string formattedAddress - the EIP-55 checksum formatted address + function utils.formatAddress(address) + if utils.isValidUnformattedEthAddress(address) then + return utils.formatEIP55Address(address) + end + return address + end + + --- Safely decodes a JSON string + --- @param jsonString string|nil The JSON string to decode + --- @return table|nil decodedJson - the decoded JSON or nil if the string is nil or the decoding fails + function utils.safeDecodeJson(jsonString) + if not jsonString then + return nil + end + local status, result = pcall(json.decode, jsonString) + if not status then + return nil + end + return result + end + + --- Finds an element in an array that matches a predicate + --- @param array table The array to search + --- @param predicate function The predicate to match + --- @return number|nil index - the index of the found element or nil if the element is not found + function utils.findInArray(array, predicate) + for i = 1, #array do + if predicate(array[i]) then + return i -- Return the index of the found element + end + end + return nil -- Return nil if the element is not found + end + + --- Deep copies a table with optional exclusion of specified fields, including nested fields + --- Preserves proper sequential ordering of array tables when some of the excluded nested keys are array indexes + --- @generic T: table|nil + --- @param original T The table to copy + --- @param excludedFields table|nil An array of keys or dot-separated key paths to exclude from the deep copy + --- @return T The deep copy of the table or nil if the original is nil + function utils.deepCopy(original, excludedFields) + if not original then + return nil + end + + if type(original) ~= "table" then + return original + end + + -- Fast path: If no excluded fields, copy directly + if not excludedFields or #excludedFields == 0 then + local copy = {} + for key, value in pairs(original) do + if type(value) == "table" then + copy[key] = utils.deepCopy(value) -- Recursive copy for nested tables + else + copy[key] = value + end + end + return copy + end + + -- If excludes are provided, create a lookup table for excluded fields + local excluded = utils.createLookupTable(excludedFields) + + -- Helper function to check if a key path is excluded + local function isExcluded(keyPath) + for excludedKey in pairs(excluded) do + if keyPath == excludedKey or keyPath:match("^" .. excludedKey .. "%.") then + return true + end + end + return false + end + + -- Recursive function to deep copy with nested field exclusion + local function deepCopyHelper(orig, path) + if type(orig) ~= "table" then + return orig + end + + local result = {} + local isArray = true + + -- Check if all keys are numeric and sequential + for key in pairs(orig) do + if type(key) ~= "number" or key % 1 ~= 0 then + isArray = false + break + end + end + + if isArray then + -- Collect numeric keys in sorted order for sequential reindexing + local numericKeys = {} + for key in pairs(orig) do + table.insert(numericKeys, key) + end + table.sort(numericKeys) + + local index = 1 + for _, key in ipairs(numericKeys) do + local keyPath = path and (path .. "." .. key) or tostring(key) + if not isExcluded(keyPath) then + result[index] = deepCopyHelper(orig[key], keyPath) -- Sequentially reindex + index = index + 1 + end + end + else + -- Handle non-array tables (dictionaries) + for key, value in pairs(orig) do + local keyPath = path and (path .. "." .. key) or key + if not isExcluded(keyPath) then + result[key] = deepCopyHelper(value, keyPath) + end + end + end + + return result + end + + -- Use the exclusion-aware deep copy helper + return deepCopyHelper(original, nil) + end + + --- Gets the length of a table + --- @param table table The table to get the length of + --- @return number length - the length of the table + function utils.lengthOfTable(table) + local count = 0 + for _, val in pairs(table) do + if val then + count = count + 1 + end + end + return count + end + + --- Gets a hash from a base64 URL encoded string + --- @param str string The base64 URL encoded string + --- @return table The hash + function utils.getHashFromBase64URL(str) + local decodedHash = base64.decode(str, base64.URL_DECODER) + local hashStream = crypto.utils.stream.fromString(decodedHash) + return crypto.digest.sha2_256(hashStream).asBytes() + end + + --- Escapes Lua pattern characters in a string + --- @param str string The string to escape + --- @return string # The escaped string + local function escapePattern(str) + return (str:gsub("([%^%$%(%)%%%.%[%]%*%+%-%?])", "%%%1")) + end + + --- Splits a string by a delimiter + --- @param input string The string to split + --- @param delimiter string|nil The delimiter to split by + --- @return table # The split string + function utils.splitString(input, delimiter) + delimiter = delimiter or "," + delimiter = escapePattern(delimiter) + local result = {} + for token in (input or ""):gmatch(string.format("([^%s]+)", delimiter)) do + table.insert(result, token) + end + return result + end + + --- Trims a string + --- @param input string The string to trim + --- @return string The trimmed string + function utils.trimString(input) + return input:match("^%s*(.-)%s*$") + end + + --- Splits a string by a delimiter and trims each token + --- @param input string|nil The string to split + --- @param delimiter string|nil The delimiter to split by, defaults to "," + --- @return table tokens - the split and trimmed string + function utils.splitAndTrimString(input, delimiter) + delimiter = escapePattern(delimiter or ",") + if not input then + return {} + end + local tokens = {} + for _, token in ipairs(utils.splitString(input, delimiter)) do + local trimmed = utils.trimString(token) + if #trimmed > 0 then + table.insert(tokens, trimmed) + end + end + return tokens + end + + --- Checks if a timestamp is an integer and converts it to milliseconds if it is in seconds + --- @param timestamp number The timestamp to check and convert + --- @return number timestampInMs - the timestamp in milliseconds + function utils.checkAndConvertTimestampToMs(timestamp) + -- Check if the timestamp is an integer + assert(type(timestamp) == "number", "Timestamp must be a number") + assert(utils.isInteger(timestamp), "Timestamp must be an integer") + + -- Define the plausible range for Unix timestamps in seconds + local min_timestamp = 0 + local max_timestamp = 4102444800 -- Corresponds to 2100-01-01 + + if timestamp >= min_timestamp and timestamp <= max_timestamp then + -- The timestamp is already in seconds, convert it to milliseconds + return timestamp * 1000 + end + + -- If the timestamp is outside the range for seconds, check for milliseconds + local min_timestamp_ms = min_timestamp * 1000 + local max_timestamp_ms = max_timestamp * 1000 + + if timestamp >= min_timestamp_ms and timestamp <= max_timestamp_ms then + return timestamp + end + + error("Timestamp is out of range") + end + + function utils.reduce(tbl, fn, init) + local acc = init + local i = 1 + for k, v in pairs(tbl) do + acc = fn(acc, k, v, i) + i = i + 1 + end + return acc + end + + function utils.map(tbl, fn) + local newTbl = {} + for k, v in pairs(tbl) do + newTbl[k] = fn(k, v) + end + return newTbl + end + + function utils.toTrainCase(str) + -- Replace underscores and spaces with hyphens + str = str:gsub("[_%s]+", "-") + + -- Handle camelCase and PascalCase by adding a hyphen before uppercase letters that follow lowercase letters + str = str:gsub("(%l)(%u)", "%1-%2") + + -- Capitalize the first letter of every word (after hyphen) and convert to Train-Case + str = str:gsub("(%a)([%w]*)", function(first, rest) + -- If the word is all uppercase (like "GW"), preserve it + if first:upper() == first and rest:upper() == rest then + return first:upper() .. rest + else + return first:upper() .. rest:lower() + end + end) + return str + end + + function utils.createLookupTable(tbl, valueFn) + local lookupTable = {} + valueFn = valueFn or function() + return true + end + for key, value in pairs(tbl or {}) do + lookupTable[value] = valueFn(key, value) + end + return lookupTable + end + + function utils.getTableKeys(tbl) + local keys = {} + for key, _ in pairs(tbl or {}) do + table.insert(keys, key) + end + return keys + end + + function utils.filterArray(arr, predicate) + local filtered = {} + for i, value in ipairs(arr or {}) do -- ipairs ensures we only traverse numeric keys sequentially + if predicate and predicate(i, value) then + table.insert(filtered, value) -- Insert re-indexes automatically + end + end + return filtered + end + + function utils.filterDictionary(tbl, predicate) + local filtered = {} + for key, value in pairs(tbl or {}) do + if predicate and predicate(key, value) then + filtered[key] = value + end + end + return filtered + end + + --- Creates a predicate function from a table of filters. + --- Each key/value pair in the filter table must be satisfied for an item to match. + --- A filter value can be a table of acceptable values or a single value. + --- @param filters table|nil The filters to convert + --- @return function|nil predicate - the predicate function or nil if no filters + function utils.createFilterFunction(filters) + if type(filters) ~= "table" then + return nil + end + + -- Precompute lookup maps for array values so repeated checks are O(1) + local lookups = {} + for field, value in pairs(filters) do + if type(value) == "table" then + lookups[field] = utils.createLookupTable(value) + else + lookups[field] = value + end + end + + return function(item) + for field, expected in pairs(lookups) do + local itemValue = type(item) == "table" and item[field] or nil + if type(expected) == "table" then + if not expected[itemValue] then + return false + end + else + if itemValue ~= expected then + return false + end + end + end + return true + end + end + + --- Sanitizes inputs to ensure they are valid strings + --- @param table table The table to sanitize + --- @return table sanitizedTable - the sanitized table + function utils.validateAndSanitizeInputs(table) + assert(type(table) == "table", "Table must be a table") + local sanitizedTable = {} + for key, value in pairs(table) do + assert(type(key) == "string", "Key must be a string") + assert( + type(value) == "string" or type(value) == "number" or type(value) == "boolean", + "Value must be a string, integer, or boolean" + ) + if type(value) == "string" then + assert(#key > 0, "Key cannot be empty") + assert(#value > 0, "Value cannot be empty") + assert(not string.match(key, "^%s+$"), "Key cannot be only whitespace") + assert(not string.match(value, "^%s+$"), "Value cannot be only whitespace") + end + if type(value) == "boolean" then + assert(value == true or value == false, "Boolean value must be true or false") + end + if type(value) == "number" then + assert(utils.isInteger(value), "Number must be an integer") + end + sanitizedTable[key] = value + end + + local knownAddressTags = { + "Recipient", + "Initiator", + "Target", + "Source", + "Address", + "Vault-Id", + "Process-Id", + "Observer-Address", + } + + for _, tagName in ipairs(knownAddressTags) do + -- Format all incoming addresses + sanitizedTable[tagName] = sanitizedTable[tagName] and utils.formatAddress(sanitizedTable[tagName]) or nil + end + + local knownNumberTags = { + "Quantity", + "Lock-Length", + "Operator-Stake", + "Delegated-Stake", + "Withdraw-Stake", + "Timestamp", + "Years", + "Min-Delegated-Stake", + "Port", + "Extend-Length", + "Delegate-Reward-Share-Ratio", + "Epoch-Index", + "Price-Interval-Ms", + "Block-Height", + } + for _, tagName in ipairs(knownNumberTags) do + -- Format all incoming numbers + sanitizedTable[tagName] = sanitizedTable[tagName] and tonumber(sanitizedTable[tagName]) or nil + end + + local knownBooleanTags = { + "Allow-Unsafe-Addresses", + "Force-Prune", + "Revokable", + } + for _, tagName in ipairs(knownBooleanTags) do + sanitizedTable[tagName] = sanitizedTable[tagName] + and utils.booleanOrBooleanStringToBoolean(sanitizedTable[tagName]) + or nil + end + return sanitizedTable + end + + --- @param value string|boolean + --- @return boolean + function utils.booleanOrBooleanStringToBoolean(value) + if type(value) == "boolean" then + return value + end + return type(value) == "string" and string.lower(value) == "true" + end + + function utils.baseNameForName(name) + return (name or ""):match("[^_]+$") or name + end + + --- @param name string An ArNS name with or without an undername + --- @return string|nil # The undername, if present, or nil + function utils.undernameForName(name) + if not name:match("_") then + return nil + end + + local baseName = utils.baseNameForName(name) + return string.gsub(name:reverse(), baseName:reverse() .. "_", "", 1):reverse() + end + + return utils +end + +_G.package.loaded[".src.utils"] = _loaded_mod_src_utils() + +-- module: ".src.globals" +local function _loaded_mod_src_globals() + local constants = require(".src.constants") + local utils = require(".src.utils") + local globals = {} + + --[[ + Constants +]] + Name = Name or constants.NAME + Ticker = Ticker or constants.TICKER + Logo = Logo or constants.LOGO + Denomination = Denomination or constants.DENOMINATION + Owner = Owner or ao.env and ao.env.Process and ao.env.Process.Owner or "owner" + + --[[ + Balances +]] + Balances = Balances or {} + Balances[ao.id] = Balances[ao.id] or constants.DEFAULT_PROTOCOL_BALANCE + Balances[Owner] = Balances[Owner] or (constants.TOTAL_TOKEN_SUPPLY - constants.DEFAULT_PROTOCOL_BALANCE) + + --[[ + Token Supply +]] + --- @type number + TotalSupply = TotalSupply or constants.TOTAL_TOKEN_SUPPLY + + --[[ + Gateway Registry +]] + --- @alias Gateways table + --- @type Gateways + GatewayRegistry = GatewayRegistry or {} + --- @type GatewayRegistrySettings + GatewayRegistrySettings = GatewayRegistrySettings or utils.deepCopy(constants.DEFAULT_GAR_SETTINGS) + + --[[ + Epochs +]] + --- @alias Epochs table + --- @type Epochs + Epochs = Epochs or {} + --- @type EpochSettings + EpochSettings = EpochSettings or utils.deepCopy(constants.DEFAULT_EPOCH_SETTINGS) + --- @type DistributionSettings + DistributionSettings = DistributionSettings or utils.deepCopy(constants.DEFAULT_DISTRIBUTION_SETTINGS) + + --[[ + NameRegistry +]] + --- @type NameRegistry + NameRegistry = NameRegistry + or { + reserved = { www = {} }, -- www is reserved by default + records = {}, + returned = {}, + } + + --[[ + Primary Names +]] + --- @type PrimaryNames + PrimaryNames = PrimaryNames or { + requests = {}, + names = {}, + owners = {}, + } + + --[[ + DemandFactor +]] + --- @type DemandFactor + DemandFactor = DemandFactor or utils.deepCopy(constants.DEFAULT_DEMAND_FACTOR) + --- @type DemandFactorSettings + DemandFactorSettings = DemandFactorSettings or utils.deepCopy(constants.DEFAULT_DEMAND_FACTOR_SETTINGS) + + --[[ + Vaults +]] + --- @type Vaults + Vaults = Vaults or {} + + --[[ + Last Known Variables - primarily used for eventing and pruning +]] + --- @type Timestamp|nil + LastKnownMessageTimestamp = LastKnownMessageTimestamp or 0 + --- @type string + LastKnownMessageId = LastKnownMessageId or "" + --- @type Timestamp|nil + LastGracePeriodEntryEndTimestamp = LastGracePeriodEntryEndTimestamp or 0 + --- @type number + LastCreatedEpochIndex = LastCreatedEpochIndex or -1 + --- @type number + LastDistributedEpochIndex = LastDistributedEpochIndex or 0 + --- @type number + LastKnownCirculatingSupply = LastKnownCirculatingSupply or 0 -- total circulating supply (e.g. balances - protocol balance) + --- @type number + LastKnownLockedSupply = LastKnownLockedSupply or 0 -- total vault balance across all vaults + --- @type number + LastKnownStakedSupply = LastKnownStakedSupply or 0 -- total operator stake across all gateways + --- @type number + LastKnownDelegatedSupply = LastKnownDelegatedSupply or 0 -- total delegated stake across all gateways + --- @type number + LastKnownWithdrawSupply = LastKnownWithdrawSupply or 0 -- total withdraw supply across all gateways (gateways and delegates) + + --[[ + Pruning Timestamps +]] + --- @type Timestamp|nil + NextRecordsPruneTimestamp = NextRecordsPruneTimestamp or 0 + --- @type Timestamp|nil + NextReturnedNamesPruneTimestamp = NextReturnedNamesPruneTimestamp or 0 + --- @type Timestamp|nil + NextPrimaryNamesPruneTimestamp = NextPrimaryNamesPruneTimestamp or 0 + --- @type Timestamp|nil + NextBalanceVaultsPruneTimestamp = NextBalanceVaultsPruneTimestamp or 0 + --- @type Timestamp|nil + NextGatewayVaultsPruneTimestamp = NextGatewayVaultsPruneTimestamp or 0 + --- @type Timestamp|nil + NextGatewaysPruneTimestamp = NextGatewaysPruneTimestamp or 0 + --- @type Timestamp|nil + NextRedelegationsPruneTimestamp = NextRedelegationsPruneTimestamp or 0 + + return globals +end + +_G.package.loaded[".src.globals"] = _loaded_mod_src_globals() + +-- module: ".src.balances" +local function _loaded_mod_src_balances() + local utils = require(".src.utils") + local balances = {} + + --- @alias mARIO number + + --- Transfers tokens from one address to another + ---@param recipient string The address to receive tokens + ---@param from string The address sending tokens + ---@param qty number The amount of tokens to transfer (must be integer) + ---@param allowUnsafeAddresses boolean Whether to allow unsafe addresses + ---@return table Updated balances for sender and recipient addresses + function balances.transfer(recipient, from, qty, allowUnsafeAddresses) + assert(type(recipient) == "string", "Recipient is required!") + assert(type(from) == "string", "From is required!") + assert(from ~= recipient, "Cannot transfer to self") + assert(utils.isValidAddress(recipient, allowUnsafeAddresses), "Invalid recipient") + assert(type(qty) == "number", "Quantity is required and must be a number!") + assert(recipient ~= from, "Cannot transfer to self") + assert(utils.isInteger(qty), "Quantity must be an integer: " .. qty) + assert(qty > 0, "Quantity must be greater than 0") + + balances.reduceBalance(from, qty) + balances.increaseBalance(recipient, qty) + + return { + [from] = Balances[from], + [recipient] = Balances[recipient], + } + end + + --- Gets the balance for a specific address + ---@param target WalletAddress The address to get balance for + ---@return mARIO The balance amount (0 if address has no balance) + function balances.getBalance(target) + return Balances[target] or 0 + end + + --- Gets a deep copy of all balances in the system + ---@return table # All address:balance pairs + function balances.getBalances() + return utils.deepCopy(Balances) or {} + end + + --- Gets all balances in the system + ---@return table # All address:balance pairs + function balances.getBalancesUnsafe() + return Balances or {} + end + + --- Reduces the balance of an address + ---@param target string The address to reduce balance for + ---@param qty number The amount to reduce by (must be integer) + ---@throws error If target has insufficient balance + function balances.reduceBalance(target, qty) + assert(balances.walletHasSufficientBalance(target, qty), "Insufficient balance") + assert(qty > 0, "Quantity must be greater than 0") + + local prevBalance = balances.getBalance(target) + Balances[target] = prevBalance - qty + end + + --- Increases the balance of an address + --- @param target string The address to increase balance for + --- @param qty number The amount to increase by (must be integer) + function balances.increaseBalance(target, qty) + assert(utils.isInteger(qty), "Quantity must be an integer: " .. qty) + local prevBalance = balances.getBalance(target) or 0 + Balances[target] = prevBalance + qty + end + + --- Gets paginated list of all balances + --- @param cursor string|nil The address to start from + --- @param limit number Max number of results to return + --- @param sortBy string|nil Field to sort by + --- @param sortOrder string "asc" or "desc" sort direction + --- @return table Array of {address, balance} objects + function balances.getPaginatedBalances(cursor, limit, sortBy, sortOrder) + local allBalances = balances.getBalances() + local balancesArray = {} + local cursorField = "address" -- the cursor will be the wallet address + for address, balance in pairs(allBalances) do + table.insert(balancesArray, { + address = address, + balance = balance, + }) + end + + return utils.paginateTableWithCursor(balancesArray, cursor, cursorField, limit, sortBy, sortOrder) + end + + --- Checks if a wallet has a sufficient balance + --- @param wallet string The address of the wallet + --- @param quantity number The amount to check against the balance + --- @return boolean True if the wallet has a sufficient balance, false otherwise + function balances.walletHasSufficientBalance(wallet, quantity) + return Balances[wallet] ~= nil and Balances[wallet] >= quantity + end + + return balances +end + +_G.package.loaded[".src.balances"] = _loaded_mod_src_balances() + +-- module: ".src.gar" +local function _loaded_mod_src_gar() + local balances = require(".src.balances") + local constants = require(".src.constants") + local utils = require(".src.utils") + local gar = {} + + --- @class GatewayRegistrySettings + --- @field observers ObserverSettings + --- @field operators OperatorSettings + --- @field delegates DelegateSettings + --- @field expeditedWithdrawals ExpeditedWithdrawalsSettings + + --- @class CompactGatewaySettings + --- @field allowDelegatedStaking boolean + --- @field allowedDelegatesLookup table | nil + --- @field delegateRewardShareRatio number + --- @field autoStake boolean + --- @field minDelegatedStake number + --- @field label string + --- @field fqdn string + --- @field protocol string + --- @field port number + --- @field properties string + --- @field note string | nil + + --- @class CompactGateway + --- @field operatorStake number + --- @field totalDelegatedStake number + --- @field startTimestamp Timestamp + --- @field endTimestamp Timestamp|nil + --- @field stats GatewayStats + --- @field settings CompactGatewaySettings + --- @field services GatewayServices | nil + --- @field status "joined"|"leaving" + --- @field observerAddress WalletAddress + --- @field weights GatewayWeights + --- @field slashings table | nil + + --- @class Gateway : CompactGateway + --- @field vaults table + --- @field delegates table + --- @field settings GatewaySettings + + --- @class GatewayStats + --- @field prescribedEpochCount number + --- @field observedEpochCount number + --- @field totalEpochCount number + --- @field passedEpochCount number + --- @field failedEpochCount number + --- @field failedConsecutiveEpochs number + --- @field passedConsecutiveEpochs number + + --- @class GatewaySettings : CompactGatewaySettings + --- @field allowedDelegatesLookup table | nil + + --- @class GatewayWeights + --- @field stakeWeight number + --- @field tenureWeight number + --- @field gatewayPerformanceRatio number + --- @field observerPerformanceRatio number + --- @field compositeWeight number + --- @field normalizedCompositeWeight number + + --- @alias GatewayServices table<'bundler', GatewayService> + + --- @class GatewayService + --- @field fqdn string + --- @field port number + --- @field path string + --- @field protocol string + + --- @alias MessageId string + --- @alias Timestamp number + + --- @class Delegate + --- @field delegatedStake number + --- @field startTimestamp Timestamp + --- @field vaults table + + --- @class ObserverSettings + --- @field tenureWeightDays number + --- @field tenureWeightDurationMs number + --- @field maxTenureWeight number + + --- @class OperatorSettings + --- @field minStake number + --- @field withdrawLengthMs number + --- @field leaveLengthMs number + --- @field failedEpochCountMax number + --- @field failedGatewaySlashRate number + --- @field maxDelegateRewardSharePct number + --- @class ExpeditedWithdrawalsSettings + --- @field minExpeditedWithdrawalPenaltyRate number + --- @field maxExpeditedWithdrawalPenaltyRate number + --- @field minExpeditedWithdrawalAmount number + + --- @class DelegateSettings + --- @field minStake number + --- @field withdrawLengthMs number + + --- @class JoinGatewaySettings + --- @field allowDelegatedStaking boolean | nil + --- @field allowedDelegates WalletAddress[] | nil + --- @field delegateRewardShareRatio number | nil + --- @field autoStake boolean | nil + --- @field minDelegatedStake number + --- @field label string + --- @field fqdn string + --- @field protocol string + --- @field port number + --- @field properties string + --- @field note string | nil + + --- Joins the network with the given parameters + --- @param from WalletAddress The address from which the request is made + --- @param stake mARIO: The amount of stake to be used + --- @param settings JoinGatewaySettings The settings for joining the network + --- @param services GatewayServices|nil The services to be used in the network + --- @param observerAddress WalletAddress The address of the observer + --- @param timeStamp Timestamp The timestamp of the request + --- @return Gateway # Returns the newly joined gateway + function gar.joinNetwork(from, stake, settings, services, observerAddress, timeStamp) + gar.assertValidGatewayParameters(from, stake, settings, services, observerAddress) + + assert(not gar.getGateway(from), "Gateway already exists") + assert(balances.walletHasSufficientBalance(from, stake), "Insufficient balance") + + local newGateway = { + operatorStake = stake, + totalDelegatedStake = 0, + vaults = {}, + delegates = {}, + startTimestamp = timeStamp, + stats = { + prescribedEpochCount = 0, + observedEpochCount = 0, + totalEpochCount = 0, + passedEpochCount = 0, + failedEpochCount = 0, + failedConsecutiveEpochs = 0, + passedConsecutiveEpochs = 0, + }, + settings = { + allowDelegatedStaking = settings.allowDelegatedStaking or false, + allowedDelegatesLookup = settings.allowedDelegates and utils.createLookupTable( + settings.allowedDelegates + ) or nil, + delegateRewardShareRatio = settings.delegateRewardShareRatio or 0, + autoStake = settings.autoStake or true, + minDelegatedStake = settings.minDelegatedStake or gar.getSettings().delegates.minStake, + label = settings.label, + fqdn = settings.fqdn, + protocol = settings.protocol or "https", + port = settings.port or 443, + properties = settings.properties, + note = settings.note or "", + }, + services = services or nil, + status = "joined", + observerAddress = observerAddress or from, + weights = { + stakeWeight = 0, + tenureWeight = 0, + gatewayPerformanceRatio = 0, + observerPerformanceRatio = 0, + compositeWeight = 0, + normalizedCompositeWeight = 0, + }, + } + + local gateway = gar.addGateway(from, newGateway) + balances.reduceBalance(from, stake) + return gateway + end + + --- @param from WalletAddress the address of the gateway to exit + --- @param currentTimestamp Timestamp + --- @param msgId MessageId + --- @return Gateway # a copy of the updated gateway + function gar.leaveNetwork(from, currentTimestamp, msgId) + local gateway = gar.getGateway(from) + + assert(gateway, "Gateway not found") + assert( + gar.isGatewayEligibleToLeave(gateway, currentTimestamp), + "The gateway is not eligible to leave the network." + ) + + local gatewayEndTimestamp = currentTimestamp + gar.getSettings().operators.leaveLengthMs + local minimumStakedTokens = math.min(gar.getSettings().operators.minStake, gateway.operatorStake) + local gatewayStakeWithdrawTimestamp = currentTimestamp + gar.getSettings().operators.withdrawLengthMs + + -- if the slash happens to be 100% we do not need to vault anything + if minimumStakedTokens > 0 then + createGatewayExitVault(gateway, minimumStakedTokens, currentTimestamp, from) + + -- if there is more than the minimum staked tokens, we need to vault the rest but on shorter term + local remainingStake = gateway.operatorStake - gar.getSettings().operators.minStake + + if remainingStake > 0 then + createGatewayWithdrawVault(gateway, msgId, remainingStake, currentTimestamp) + gar.scheduleNextGatewaysPruning(gatewayStakeWithdrawTimestamp) + end + end + + gateway.status = "leaving" + gateway.endTimestamp = gatewayEndTimestamp + gateway.operatorStake = 0 + gar.scheduleNextGatewaysPruning(gatewayEndTimestamp) + + -- Add tokens from each delegate to a vault that unlocks after the delegate withdrawal period ends + for address, _ in pairs(gateway.delegates) do + gar.kickDelegateFromGateway(address, gateway, msgId, currentTimestamp) + end + + -- update global state + GatewayRegistry[from] = gateway + return utils.deepCopy(gateway) + end + + --- Increases the operator stake for a gateway + ---@param from string # The address of the gateway to increase stake for + ---@param qty number # The amount of stake to increase by - must be positive integer + ---@return table # The updated gateway object + function gar.increaseOperatorStake(from, qty) + assert(type(qty) == "number", "Quantity is required and must be a number") + assert(qty > 0 and utils.isInteger(qty), "Quantity must be an integer greater than 0") + + local gateway = gar.getGateway(from) + assert(gateway, "Gateway not found") + assert(gateway.status ~= "leaving", "Gateway is leaving the network and cannot accept additional stake.") + assert(balances.walletHasSufficientBalance(from, qty), "Insufficient balance") + + balances.reduceBalance(from, qty) + gateway.operatorStake = gateway.operatorStake + qty + -- update the gateway + GatewayRegistry[from] = gateway + return gateway + end + + -- Utility function to calculate withdrawal details and handle balance adjustments + ---@param stake number # The amount of stake to withdraw in mARIO + ---@param elapsedTimeMs number # The amount of time that has elapsed since the withdrawal started + ---@param totalWithdrawalTimeMs number # The total amount of time the withdrawal will take + ---@param from string # The address of the operator or delegate + ---@return number # The penalty rate as a percentage + ---@return number # The expedited withdrawal fee in mARIO, given to the protocol balance + ---@return number # The final amount withdrawn, after the penalty fee is subtracted and moved to the from balance + local function processInstantWithdrawal(stake, elapsedTimeMs, totalWithdrawalTimeMs, from) + -- Calculate the withdrawal fee and the amount to withdraw + local maxPenaltyRate = gar.getSettings().expeditedWithdrawals.maxExpeditedWithdrawalPenaltyRate + local minPenaltyRate = gar.getSettings().expeditedWithdrawals.minExpeditedWithdrawalPenaltyRate + local penaltyRateDecay = (maxPenaltyRate - minPenaltyRate) * elapsedTimeMs / totalWithdrawalTimeMs + local penaltyRateAfterDecay = maxPenaltyRate - penaltyRateDecay + -- the maximum rate they'll pay based on the decay + local maximumPenaltyRate = math.min(maxPenaltyRate, penaltyRateAfterDecay) + -- take the maximum rate between the minimum rate and the maximum rate after decay + local floatingPenaltyRate = math.max(minPenaltyRate, maximumPenaltyRate) + + -- round to three decimal places to avoid floating point precision loss with small numbers + local finalPenaltyRate = utils.roundToPrecision(floatingPenaltyRate, 3) + -- round down to avoid any floating point precision loss with small numbers + local expeditedWithdrawalFee = math.floor(stake * finalPenaltyRate) + local amountToWithdraw = stake - expeditedWithdrawalFee + + -- Withdraw the tokens to the delegate and the protocol balance + balances.increaseBalance(ao.id, expeditedWithdrawalFee) + balances.increaseBalance(from, amountToWithdraw) + + return expeditedWithdrawalFee, amountToWithdraw, finalPenaltyRate + end + + function gar.decreaseOperatorStake(from, qty, currentTimestamp, msgId, instantWithdraw) + assert(type(qty) == "number", "Quantity is required and must be a number") + assert(qty > 0, "Quantity must be greater than 0") + + local gateway = gar.getGateway(from) + + assert(gateway, "Gateway not found") + assert(gateway.status ~= "leaving", "Gateway is leaving the network and cannot withdraw more stake.") + + local maxWithdraw = gateway.operatorStake - gar.getSettings().operators.minStake + + assert( + qty <= maxWithdraw, + "Resulting stake of " + .. gateway.operatorStake - qty + .. " mARIO is not enough to maintain the minimum operator stake of " + .. gar.getSettings().operators.minStake + .. " mARIO" + ) + + gateway.operatorStake = gateway.operatorStake - qty + + local expeditedWithdrawalFee = 0 + local amountToWithdraw = 0 + local penaltyRate = 0 + if instantWithdraw == true then + -- Calculate the penalty and withdraw using the utility function + expeditedWithdrawalFee, amountToWithdraw, penaltyRate = processInstantWithdrawal(qty, 0, 0, from) + else + createGatewayWithdrawVault(gateway, msgId, qty, currentTimestamp) + gar.scheduleNextGatewaysPruning(gateway.vaults[msgId].endTimestamp) + end + + -- Update the gateway + GatewayRegistry[from] = gateway + + return { + gateway = gateway, + penaltyRate = penaltyRate, + expeditedWithdrawalFee = expeditedWithdrawalFee, + amountWithdrawn = amountToWithdraw, + } + end + + --- @class UpdateGatewaySettings : GatewaySettings + --- @field allowDelegatedStaking boolean | nil + --- @field allowedDelegates WalletAddress[] | nil + --- @field delegateRewardShareRatio number | nil + --- @field autoStake boolean | nil + --- @field minDelegatedStake number | nil + --- @field note string | nil + + --- @param from WalletAddress + --- @param updatedSettings UpdateGatewaySettings + --- @param updatedServices GatewayServices|nil + --- @param observerAddress WalletAddress + --- @param currentTimestamp Timestamp + --- @param msgId MessageId + --- @return Gateway # the updated gateway + function gar.updateGatewaySettings(from, updatedSettings, updatedServices, observerAddress, currentTimestamp, msgId) + local gateway = gar.getGateway(from) + assert(gateway, "Gateway not found") + assert(gateway.status ~= "leaving", "Gateway is leaving the network and cannot be updated") + + gar.assertValidGatewayParameters(from, gateway.operatorStake, updatedSettings, updatedServices, observerAddress) + + assert( + not updatedSettings.minDelegatedStake + or updatedSettings.minDelegatedStake >= gar.getSettings().delegates.minStake, + "The minimum delegated stake must be at least " .. gar.getSettings().delegates.minStake .. " mARIO" + ) + + for gatewayAddress, existingGateway in pairs(gar.getGatewaysUnsafe()) do + local invalidObserverAddress = existingGateway.observerAddress == observerAddress and gatewayAddress ~= from + assert( + not invalidObserverAddress, + "Invalid observer wallet. The provided observer wallet is correlated with another gateway." + ) + end + + -- update the allow list first if necessary since we may need it for accounting in any subsequent delegate kicks + if updatedSettings.allowDelegatedStaking and updatedSettings.allowedDelegates then + -- Replace the existing lookup table + --- @diagnostic disable-next-line: inject-field + updatedSettings.allowedDelegatesLookup = utils.createLookupTable(updatedSettings.allowedDelegates) + updatedSettings.allowedDelegates = nil -- no longer need the list now that lookup is built + + -- remove any delegates that are not in the allowlist + for delegateAddress, delegate in pairs(gateway.delegates) do + if updatedSettings.allowedDelegatesLookup[delegateAddress] then + if delegate.delegatedStake > 0 then + -- remove the delegate from the lookup since it's adequately tracked as a delegate already + updatedSettings.allowedDelegatesLookup[delegateAddress] = nil + end + elseif delegate.delegatedStake > 0 then + gar.kickDelegateFromGateway(delegateAddress, gateway, msgId, currentTimestamp) + end + -- else: the delegate was exiting already with 0-balance and will no longer be on the allowlist + end + end + + if not updatedSettings.allowDelegatedStaking then + -- Add tokens from each delegate to a vault that unlocks after the delegate withdrawal period ends + if next(gateway.delegates) ~= nil then -- staking disabled and delegates must go + for address, _ in pairs(gateway.delegates) do + gar.kickDelegateFromGateway(address, gateway, msgId, currentTimestamp) + end + end + + -- clear the allowedDelegatesLookup since we no longer need it + --- @diagnostic disable-next-line: inject-field + updatedSettings.allowedDelegatesLookup = nil + end + + -- if allowDelegateStaking is currently false, and you want to set it to true - you have to wait until all the vaults have been returned + assert( + not ( + updatedSettings.allowDelegatedStaking + and not gateway.settings.allowDelegatedStaking + and next(gateway.delegates) + ), + "You cannot enable delegated staking until all delegated stakes have been withdrawn." + ) + + gateway.settings = updatedSettings + if updatedServices then + gateway.services = updatedServices + end + if observerAddress then + gateway.observerAddress = observerAddress + end + -- update the gateway on the global state + GatewayRegistry[from] = gateway + return gateway + end + + --- Gets a copy of a gateway by address + ---@param address WalletAddress The address of the gateway to fetch + ---@return Gateway|nil A gateway object copy or nil if not found + function gar.getGateway(address) + return utils.deepCopy(GatewayRegistry[address]) + end + + --- Gets a copy of a gateway by address, minus its vaults, delegates, and allowlist + ---@param address WalletAddress The address of the gateway to fetch + ---@return CompactGateway|nil A gateway object copy or nil if not found + function gar.getCompactGateway(address) + return utils.deepCopy(GatewayRegistry[address], { "delegates", "vaults", "settings.allowedDelegatesLookup" }) + end + + --- Gets a gateway reference by address, preferably for read-only activities + ---@param address string The address of the gateway to fetch + ---@return Gateway|nil The gateway object or nil if not found + function gar.getGatewayUnsafe(address) + return GatewayRegistry[address] + end + + --- Gets all gateways + ---@return Gateways # address-mapped, deep copies of all the gateways objects + function gar.getGateways() + local gateways = utils.deepCopy(GatewayRegistry) + return gateways or {} + end + + --- @return Gateways # All the address-mapped gateway objects + function gar.getGatewaysUnsafe() + return GatewayRegistry or {} + end + + --- @alias CompactGateways table + --- @return CompactGateways # address-mapped, deep copies of all the gateways objects without delegates, vaults, or allowlist + function gar.getCompactGateways() + return utils.reduce(gar.getGatewaysUnsafe(), function(acc, gatewayAddress, gateway) + acc[gatewayAddress] = utils.deepCopy(gateway, { "delegates", "vaults", "settings.allowedDelegatesLookup" }) + return acc + end, {}) + end + + --- @param startTimestamp number + function gar.createDelegate(startTimestamp) + return { + delegatedStake = 0, + startTimestamp = startTimestamp, + vaults = {}, + } + end + + --- @param delegate Delegate + --- @param gateway Gateway + --- @param quantity mARIO + function increaseDelegateStakeAtGateway(delegate, gateway, quantity) + assert(delegate, "Delegate is required") + assert(gateway, "Gateway is required") + -- zero is allowed as it is a no-op + assert( + quantity and utils.isInteger(quantity) and quantity >= 0, + "Quantity is required and must be an integer greater than or equal to 0: " .. quantity + ) + delegate.delegatedStake = delegate.delegatedStake + quantity + gateway.totalDelegatedStake = gateway.totalDelegatedStake + quantity + end + + --- @param delegateAddress WalletAddress + --- @param gateway Gateway + --- @param quantity mARIO + --- @param ban boolean|nil do not add the delegate back to the gateway allowlist if their delegation is over + --- @return Delegate, boolean # a copy of the updated delegate and whether or not it was pruned + function decreaseDelegateStakeAtGateway(delegateAddress, gateway, quantity, ban) + local delegate = gateway.delegates[delegateAddress] + assert(delegate, "Delegate is required") + -- zero is allowed as it is a no-op + assert( + quantity and utils.isInteger(quantity) and quantity >= 0, + "Quantity is required and must be an integer greater than or equal to 0: " .. quantity + ) + assert(gateway, "Gateway is required") + assert(quantity <= delegate.delegatedStake, "Quantity cannot be greater than the delegate's stake") + assert( + quantity <= gateway.totalDelegatedStake, + "Quantity cannot be greater than the gateway's total delegated stake" + ) + delegate.delegatedStake = delegate.delegatedStake - quantity + gateway.totalDelegatedStake = gateway.totalDelegatedStake - quantity + local pruned = gar.pruneDelegateFromGatewayIfNecessary(delegateAddress, gateway) + if ban and gateway.settings.allowedDelegatesLookup then + gateway.settings.allowedDelegatesLookup[delegateAddress] = nil + end + return utils.deepCopy(delegate), pruned + end + + --- Creates a delegate at a gateway, managing allowlisting accounting if necessary + --- @param startTimestamp number + --- @param gateway Gateway + --- @param delegateAddress WalletAddress + --- @return Delegate # the created delegate + function gar.createDelegateAtGateway(startTimestamp, gateway, delegateAddress) + -- prune user from allow list, if necessary, to save memory + if gateway.settings.allowedDelegatesLookup then + gateway.settings.allowedDelegatesLookup[delegateAddress] = nil + end + local newDelegate = gar.createDelegate(startTimestamp) + gateway.delegates[delegateAddress] = newDelegate + return newDelegate + end + + --- @param balance mARIO # the starting balance of the vault + --- @param startTimestamp number # the timestamp when the vault was created + --- @return Vault # a vault with the specified balance, start timestamp, and computed end timestamp + function gar.createDelegateVault(balance, startTimestamp) + local vault = { + balance = balance, + startTimestamp = startTimestamp, + endTimestamp = startTimestamp + gar.getSettings().delegates.withdrawLengthMs, + } + gar.scheduleNextGatewaysPruning(vault.endTimestamp) + return vault + end + + function gar.delegateStake(from, target, qty, currentTimestamp) + assert(type(qty) == "number", "Quantity is required and must be a number") + assert(qty > 0, "Quantity must be greater than 0") + assert(type(target) == "string", "Target is required and must be a string") + assert(type(from) == "string", "From is required and must be a string") + + local gateway = gar.getGateway(target) + assert(gateway, "Gateway not found") + assert( + gateway.status ~= "leaving", + "Gateway is leaving the network and cannot have more stake delegated to it." + ) + + -- don't allow delegating to yourself + assert(from ~= target, "Cannot delegate to your own gateway, use increaseOperatorStake instead.") + assert(balances.walletHasSufficientBalance(from, qty), "Insufficient balance") + assert(gateway.settings.allowDelegatedStaking, "This Gateway does not allow delegated staking.") + assert(gar.delegateAllowedToStake(from, gateway), "This Gateway does not allow this delegate to stake.") + + -- Assuming `gateway` is a table and `fromAddress` is defined + local existingDelegate = gateway.delegates[from] + local minimumStakeForGatewayAndDelegate + -- if it is not an auto stake provided by the protocol, then we need to validate the stake amount meets the gateway's minDelegatedStake + if existingDelegate and existingDelegate.delegatedStake ~= 0 then + -- It already has a stake that is not zero + minimumStakeForGatewayAndDelegate = 1 -- Delegate must provide at least one additional mARIO + else + -- Consider if the operator increases the minimum amount after you've already staked + minimumStakeForGatewayAndDelegate = gateway.settings.minDelegatedStake + end + assert( + qty >= minimumStakeForGatewayAndDelegate, + "Quantity must be greater than the minimum delegated stake amount." + ) + + -- If this delegate has staked before, update its amount, if not, create a new delegated staker + existingDelegate = existingDelegate or gar.createDelegateAtGateway(currentTimestamp, gateway, from) + increaseDelegateStakeAtGateway(existingDelegate, gateway, qty) + + -- Decrement the user's balance + balances.reduceBalance(from, qty) + + -- update the gateway + GatewayRegistry[target] = gateway + return gateway + end + + --- Internal function to increase the stake of an existing delegate. This should only be called from epochs.lua + ---@param gatewayAddress string # The gateway address to increase stake for (required) + ---@param gateway table # The gateway object to increase stake for (required) + ---@param delegateAddress string # The address of the delegate to increase stake for (required) + ---@param qty number # The amount of stake to increase by - must be positive integer (required) + function gar.increaseExistingDelegateStake(gatewayAddress, gateway, delegateAddress, qty) + assert(gateway, "Gateway not found") + assert(delegateAddress, "Delegate address is required") + assert( + qty and utils.isInteger(qty) and qty > 0, + "Quantity is required and must be an integer greater than 0: " .. qty + ) + + local delegate = gateway.delegates[delegateAddress] + assert(delegate, "Delegate not found") + + -- consider case where delegate has been kicked from the gateway and has vaulted stake + assert( + gar.delegateAllowedToStake(delegateAddress, gateway), + "This Gateway does not allow this delegate to stake." + ) + + increaseDelegateStakeAtGateway(gateway.delegates[delegateAddress], gateway, qty) + GatewayRegistry[gatewayAddress] = gateway + return gateway + end + + ---@return GatewayRegistrySettings # a deep copy of the gateway registry settings + function gar.getSettings() + return utils.deepCopy(GatewayRegistrySettings) + end + + --- @class DecreaseDelegateStakeReturn + --- @field gatewayTotalDelegatedStake mARIO The updated amount of total delegated stake at the gateway + --- @field updatedDelegate Delegate The updated delegate object + --- @field delegatePruned boolean Whether or not the delegate was pruned from the gateway + --- @field penaltyRate number The penalty rate for the expedited withdrawal, if applicable + --- @field expeditedWithdrawalFee number The fee deducted from the stake for the expedited withdrawal, if applicable + --- @field amountWithdrawn number The amount of stake withdrawn after any penalty fee is deducted + + --- @param gatewayAddress WalletAddress The address of the gateway from which to decrease delegated stake + --- @param delegator WalletAddress The address of the delegator for which to decrease delegated stake + --- @param qty mARIO The amount of delegated stake to decrease + --- @param currentTimestamp Timestamp The current timestamp + --- @param messageId MessageId The message ID of the current action + --- @param instantWithdraw boolean Whether to withdraw the stake instantly; otherwise allow it to be vaulted + --- @return DecreaseDelegateStakeReturn # Details about the outcome of the operation + function gar.decreaseDelegateStake(gatewayAddress, delegator, qty, currentTimestamp, messageId, instantWithdraw) + assert(type(qty) == "number", "Quantity is required and must be a number") + assert(qty > 0, "Quantity must be greater than 0") + + local gateway = gar.getGateway(gatewayAddress) + + assert(gateway, "Gateway not found") + assert(gateway.status ~= "leaving", "Gateway is leaving the network and cannot withdraw more stake.") + + local delegate = gateway.delegates[delegator] + assert(delegate, "This delegate is not staked at this gateway.") + + local existingStake = delegate.delegatedStake + local requiredMinimumStake = gateway.settings.minDelegatedStake + local maxAllowedToWithdraw = existingStake - requiredMinimumStake + assert( + maxAllowedToWithdraw >= qty or qty == existingStake, + "Remaining delegated stake must be greater than the minimum delegated stake. Adjust the amount or withdraw all stake." + ) + + -- Instant withdrawal logic with penalty + local expeditedWithdrawalFee = 0 + local amountToWithdraw = 0 + local penaltyRate = 0 + + if instantWithdraw == true then + -- Calculate the penalty and withdraw using the utility function and move the balances + expeditedWithdrawalFee, amountToWithdraw, penaltyRate = processInstantWithdrawal(qty, 0, 0, delegator) + else + createDelegateWithdrawVault(gateway, delegator, messageId, qty, currentTimestamp) + end + local updatedDelegate, pruned = decreaseDelegateStakeAtGateway(delegator, gateway, qty) + + -- update the gateway + GatewayRegistry[gatewayAddress] = gateway + return { + gatewayTotalDelegatedStake = gateway.totalDelegatedStake, + updatedDelegate = updatedDelegate, + delegatePruned = pruned, + penaltyRate = penaltyRate, + expeditedWithdrawalFee = expeditedWithdrawalFee, + amountWithdrawn = amountToWithdraw, + } + end + + function gar.isGatewayLeaving(gateway) + return gateway.status == "leaving" + end + + function gar.isGatewayEligibleToLeave(gateway, timestamp) + assert(gateway, "Gateway not found") + local isJoined = gar.isGatewayJoined(gateway, timestamp) + return isJoined + end + + function gar.isGatewayActiveBeforeTimestamp(startTimestamp, gateway) + local didStartBeforeEpoch = gateway.startTimestamp <= startTimestamp + local isNotLeaving = not gar.isGatewayLeaving(gateway) + return didStartBeforeEpoch and isNotLeaving + end + + --- Returns the addresses of the gateways that are active before a given timestamp + --- @param startTimestamp number The timestamp to check if the gateways are active before + --- @return WalletAddress[] # The addresses of the active gateways + function gar.getActiveGatewayAddressesBeforeTimestamp(startTimestamp) + local activeGatewayAddresses = {} + -- use pairs as gateways is a map + for address, gateway in pairs(gar.getGatewaysUnsafe()) do + if gar.isGatewayActiveBeforeTimestamp(startTimestamp, gateway) then + table.insert(activeGatewayAddresses, address) + end + end + return activeGatewayAddresses + end + + --- Gets the weights of collection of gateways at a given timestamp + --- @param gatewayAddresses string[] The gateway addresses to get the weights for + --- @param timestamp number The timestamp to get the weights at + --- @return WeightedGateway[] # The weighted gateways + function gar.getGatewayWeightsAtTimestamp(gatewayAddresses, timestamp) + local weightedObservers = {} + local totalCompositeWeight = 0 + + -- Iterate over gateways to calculate weights + for _, gatewayAddress in pairs(gatewayAddresses) do + -- okay to use unsafe here as we are not modifying the gateway, just computing the weights + local gateway = gar.getGatewayUnsafe(gatewayAddress) + if gateway then + local totalStake = gateway.operatorStake + gateway.totalDelegatedStake -- 100 - no cap to this + local stakeWeightRatio = totalStake / gar.getSettings().operators.minStake -- this is always greater than 1 as the minOperatorStake is always less than the stake + -- the percentage of the epoch the gateway was joined for before this epoch, if the gateway starts in the future this will be 0 + local gatewayStartTimestamp = gateway.startTimestamp + local totalTimeForGateway = timestamp >= gatewayStartTimestamp and (timestamp - gatewayStartTimestamp) + or -1 + local calculatedTenureWeightForGateway = totalTimeForGateway < 0 and 0 + or ( + totalTimeForGateway > 0 + and totalTimeForGateway / gar.getSettings().observers.tenureWeightDurationMs + or 1 / gar.getSettings().observers.tenureWeightDurationMs + ) + local gatewayTenureWeight = + math.min(calculatedTenureWeightForGateway, gar.getSettings().observers.maxTenureWeight) + + local totalEpochsGatewayPassed = gateway.stats.passedEpochCount or 0 + local totalEpochsParticipatedIn = gateway.stats.totalEpochCount or 0 + local gatewayPerformanceRatio = (1 + totalEpochsGatewayPassed) / (1 + totalEpochsParticipatedIn) + local totalEpochsPrescribed = gateway.stats.prescribedEpochCount or 0 + local totalEpochsSubmitted = gateway.stats.observedEpochCount or 0 + local observerPerformanceRatio = (1 + totalEpochsSubmitted) / (1 + totalEpochsPrescribed) + + local compositeWeight = stakeWeightRatio + * gatewayTenureWeight + * gatewayPerformanceRatio + * observerPerformanceRatio + + table.insert(weightedObservers, { + gatewayAddress = gatewayAddress, + observerAddress = gateway.observerAddress, + stake = totalStake, + startTimestamp = gateway.startTimestamp, + stakeWeight = stakeWeightRatio, + tenureWeight = gatewayTenureWeight, + gatewayPerformanceRatio = gatewayPerformanceRatio, + observerPerformanceRatio = observerPerformanceRatio, + compositeWeight = compositeWeight, + normalizedCompositeWeight = nil, -- set later once we have the total composite weight + }) + + totalCompositeWeight = totalCompositeWeight + compositeWeight + end + end + + -- Calculate the normalized composite weight for each observer + for _, weightedObserver in pairs(weightedObservers) do + if totalCompositeWeight > 0 then + weightedObserver.normalizedCompositeWeight = weightedObserver.compositeWeight / totalCompositeWeight + else + weightedObserver.normalizedCompositeWeight = 0 + end + end + return weightedObservers + end + + function gar.isGatewayJoined(gateway, currentTimestamp) + return gateway.status == "joined" and gateway.startTimestamp <= currentTimestamp + end + + function gar.assertValidGatewayParameters(from, stake, settings, services, observerAddress) + assert(type(from) == "string", "from is required and must be a string") + assert(type(stake) == "number", "stake is required and must be a number") + assert(type(settings) == "table", "settings is required and must be a table") + assert( + type(observerAddress) == "string" and utils.isValidAddress(observerAddress, true), + "Observer-Address is required and must be a a valid arweave address" + ) + if settings.allowDelegatedStaking ~= nil then + assert(type(settings.allowDelegatedStaking) == "boolean", "allowDelegatedStaking must be a boolean") + end + if type(settings.allowedDelegates) == "table" then + for _, delegate in pairs(settings.allowedDelegates) do + assert(utils.isValidAddress(delegate, true), "delegates in allowedDelegates must be valid AO addresses") + end + else + assert( + settings.allowedDelegates == nil, + "allowedDelegates must be a table parsed from a comma-separated string or nil" + ) + end + + assert(type(settings.label) == "string", "label is required and must be a string") + assert(type(settings.fqdn) == "string", "fqdn is required and must be a string") + if settings.protocol ~= nil then + assert( + type(settings.protocol) == "string" and settings.protocol == "https", + "protocol is required and must be https" + ) + end + if settings.port ~= nil then + assert( + type(settings.port) == "number" + and utils.isInteger(settings.port) + and settings.port >= 0 + and settings.port <= 65535, + "port is required and must be an integer between 0 and 65535" + ) + end + assert( + type(settings.properties) == "string" and utils.isValidAddress(settings.properties, true), + "properties is required and must be a string" + ) + assert( + stake >= gar.getSettings().operators.minStake, + "Operator stake must be greater than the minimum stake to join the network" + ) + if settings.delegateRewardShareRatio ~= nil then + assert( + type(settings.delegateRewardShareRatio) == "number" + and utils.isInteger(settings.delegateRewardShareRatio) + and settings.delegateRewardShareRatio >= 0 + and settings.delegateRewardShareRatio <= gar.getSettings().operators.maxDelegateRewardSharePct, + "delegateRewardShareRatio must be an integer between 0 and " + .. gar.getSettings().operators.maxDelegateRewardSharePct + ) + end + if settings.autoStake ~= nil then + assert(type(settings.autoStake) == "boolean", "autoStake must be a boolean") + end + if settings.properties ~= nil then + assert(type(settings.properties) == "string", "properties must be a table") + end + if settings.minDelegatedStake ~= nil then + assert( + type(settings.minDelegatedStake) == "number" + and utils.isInteger(settings.minDelegatedStake) + and settings.minDelegatedStake >= gar.getSettings().delegates.minStake, + "minDelegatedStake must be an integer greater than or equal to the minimum delegated stake" + ) + end + + if services ~= nil then + assert(type(services) == "table", "services must be a table") + + local allowedServiceKeys = { bundlers = true } + for key, _ in pairs(services) do + assert(allowedServiceKeys[key], "services contains an invalid key: " .. tostring(key)) + end + + if services.bundlers ~= nil then + assert(type(services.bundlers) == "table", "services.bundlers must be a table") + + assert(utils.lengthOfTable(services.bundlers) <= 20, "No more than 20 bundlers allowed") + + for _, bundler in ipairs(services.bundlers) do + local allowedBundlerKeys = { fqdn = true, port = true, protocol = true, path = true } + for key, _ in pairs(bundler) do + assert(allowedBundlerKeys[key], "bundler contains an invalid key: " .. tostring(key)) + end + assert(type(bundler.fqdn) == "string", "bundler.fqdn is required and must be a string") + assert( + type(bundler.port) == "number" + and utils.isInteger(bundler.port) + and bundler.port >= 0 + and bundler.port <= 65535, + "bundler.port must be an integer between 0 and 65535" + ) + assert( + type(bundler.protocol) == "string" and bundler.protocol == "https", + "bundler.protocol is required and must be 'https'" + ) + assert(type(bundler.path) == "string", "bundler.path is required and must be a string") + end + end + end + end + + --- Updates the stats for a gateway + ---@param address string # The address of the gateway to update stats for + ---@param gateway table # The gateway object to update stats for + ---@param stats table # The stats to update the gateway with + function gar.updateGatewayStats(address, gateway, stats) + assert(gateway, "Gateway not found") + assert(stats.prescribedEpochCount, "prescribedEpochCount is required") + assert(stats.observedEpochCount, "observedEpochCount is required") + assert(stats.totalEpochCount, "totalEpochCount is required") + assert(stats.passedEpochCount, "passedEpochCount is required") + assert(stats.failedEpochCount, "failedEpochCount is required") + assert(stats.failedConsecutiveEpochs, "failedConsecutiveEpochs is required") + assert(stats.passedConsecutiveEpochs, "passedConsecutiveEpochs is required") + + gateway.stats = stats + GatewayRegistry[address] = gateway + + -- Schedule pruning if necessary + if stats.failedConsecutiveEpochs >= gar.getSettings().operators.failedEpochCountMax then + gar.scheduleNextGatewaysPruning(0) + end + + return gateway + end + + --- Updates the weights for a gateway + --- @param weightedGateway WeightedGateway The weighted gateway to update the weights for + function gar.updateGatewayWeights(weightedGateway) + local address = weightedGateway.gatewayAddress + -- by using the unsafe getGateway we avoid the need to update the GatewayRegistry global variable + local gateway = gar.getGatewayUnsafe(address) + assert(gateway, "Gateway not found") + assert(weightedGateway.stakeWeight, "stakeWeight is required") + assert(weightedGateway.tenureWeight, "tenureWeight is required") + assert(weightedGateway.gatewayPerformanceRatio, "gatewayPerformanceRatio is required") + assert(weightedGateway.observerPerformanceRatio, "observerPerformanceRatio is required") + assert(weightedGateway.compositeWeight, "compositeWeight is required") + assert(weightedGateway.normalizedCompositeWeight, "normalizedCompositeWeight is required") + + gateway.weights = { + stakeWeight = weightedGateway.stakeWeight, + tenureWeight = weightedGateway.tenureWeight, + gatewayPerformanceRatio = weightedGateway.gatewayPerformanceRatio, + observerPerformanceRatio = weightedGateway.observerPerformanceRatio, + compositeWeight = weightedGateway.compositeWeight, + normalizedCompositeWeight = weightedGateway.normalizedCompositeWeight, + } + end + + function gar.addGateway(address, gateway) + GatewayRegistry[address] = gateway + return gateway + end + + --- @class PruneGatewaysResult + --- @field prunedGateways Gateway[] The pruned gateways + --- @field slashedGateways table The slashed gateways and their amounts + --- @field gatewayStakeReturned number The gateway stake returned + --- @field delegateStakeReturned number The delegate stake returned + --- @field gatewayStakeWithdrawing number The gateway stake withdrawing + --- @field delegateStakeWithdrawing number The delegate stake withdrawing + --- @field stakeSlashed number The stake slashed + --- @field gatewayObjectTallies GatewayObjectTallies|nil Statistics on the gateway system + + --- Prunes gateways that have failed more than 30 consecutive epochs + --- @param currentTimestamp number The current timestamp + --- @param msgId string The message ID + --- @return PruneGatewaysResult # The result containing the pruned gateways, slashed gateways, and other stats + function gar.pruneGateways(currentTimestamp, msgId) + --- @type PruneGatewaysResult + local result = { + prunedGateways = {}, + slashedGateways = {}, + gatewayStakeReturned = 0, + delegateStakeReturned = 0, + gatewayStakeWithdrawing = 0, + delegateStakeWithdrawing = 0, + stakeSlashed = 0, + } + if not NextGatewaysPruneTimestamp or currentTimestamp < NextGatewaysPruneTimestamp then + -- No known pruning work to do + return result + end + + --- @type GatewayObjectTallies + local gatewayObjectTallies = { + numDelegates = 0, + numDelegations = 0, + numExitingDelegations = 0, + numDelegateVaults = 0, + numDelegatesVaulting = 0, + numGatewayVaults = 0, + numGatewaysVaulting = 0, + numGateways = 0, + numExitingGateways = 0, + } + + -- we take a deep copy so we can operate directly on the gateway objects + local gateways = gar.getGateways() + local garSettings = gar.getSettings() + + if next(gateways) == nil then + -- No pruning work to do going forward until next gateway joins + NextGatewaysPruneTimestamp = nil + return result + end + + -- reset the next prune timestamp, below will populate it with the next prune timestamp minimum + NextGatewaysPruneTimestamp = nil + + local uniqueDelegators = {} + for gatewayAddress, gateway in pairs(gateways) do + if gateway then + gatewayObjectTallies.numGateways = gatewayObjectTallies.numGateways + 1 + -- first, return any expired vaults regardless of the gateway status + for vaultId, vault in pairs(gateway.vaults) do + if vault.endTimestamp <= currentTimestamp then + unlockGatewayWithdrawVault(gateway, gatewayAddress, vaultId) + + result.gatewayStakeReturned = result.gatewayStakeReturned + vault.balance + else + -- find the next prune timestamp + gar.scheduleNextGatewaysPruning(vault.endTimestamp) + gatewayObjectTallies.numGatewayVaults = gatewayObjectTallies.numGatewayVaults + 1 + end + end + if next(gateway.vaults) ~= nil then + gatewayObjectTallies.numGatewaysVaulting = gatewayObjectTallies.numGatewaysVaulting + 1 + end + -- return any delegated vaults and return the stake to the delegate + for delegateAddress, delegate in pairs(gateway.delegates) do + for vaultId, vault in pairs(delegate.vaults) do + if vault.endTimestamp <= currentTimestamp then + unlockGatewayDelegateVault(gateway, delegateAddress, vaultId) + result.delegateStakeReturned = result.delegateStakeReturned + vault.balance + else + -- find the next prune timestamp + gar.scheduleNextGatewaysPruning(vault.endTimestamp) + gatewayObjectTallies.numDelegateVaults = gatewayObjectTallies.numDelegateVaults + 1 + end + end + if next(delegate.vaults) ~= nil then + gatewayObjectTallies.numDelegatesVaulting = gatewayObjectTallies.numDelegatesVaulting + 1 + end + + -- remove the delegate if all vaults are empty and the delegated stake is 0 + if delegate.delegatedStake == 0 and next(delegate.vaults) == nil then + -- any allowlist reassignment would have already taken place by now + gateway.delegates[delegateAddress] = nil + elseif delegate.delegatedStake > 0 then + gatewayObjectTallies.numDelegations = gatewayObjectTallies.numDelegations + 1 + if not uniqueDelegators[delegateAddress] then + uniqueDelegators[delegateAddress] = true + gatewayObjectTallies.numDelegates = gatewayObjectTallies.numDelegates + 1 + end + else + gatewayObjectTallies.numExitingDelegations = gatewayObjectTallies.numExitingDelegations + 1 + end + end + + -- update the gateway before we do anything else + GatewayRegistry[gatewayAddress] = gateway + + -- if gateway is joined but failed more than 30 consecutive epochs, mark it as leaving and put operator stake and delegate stakes in vaults + if + gateway.status == "joined" + and garSettings ~= nil + and gateway.stats.failedConsecutiveEpochs >= garSettings.operators.failedEpochCountMax + then + -- slash the minimum operator stake and return it to the protocol balance; mark the gateway as leaving which will vault remaining stake + local slashableOperatorStake = math.min(gateway.operatorStake, garSettings.operators.minStake) + local slashAmount = + math.floor(slashableOperatorStake * garSettings.operators.failedGatewaySlashRate) + result.delegateStakeWithdrawing = result.delegateStakeWithdrawing + gateway.totalDelegatedStake + result.gatewayStakeWithdrawing = result.gatewayStakeWithdrawing + + (gateway.operatorStake - slashAmount) + gar.slashOperatorStake(gatewayAddress, slashAmount, currentTimestamp) + gar.leaveNetwork(gatewayAddress, currentTimestamp, msgId) + result.slashedGateways[gatewayAddress] = slashAmount + result.stakeSlashed = result.stakeSlashed + slashAmount + gatewayObjectTallies.numGateways = gatewayObjectTallies.numGateways - 1 + gatewayObjectTallies.numExitingGateways = gatewayObjectTallies.numExitingGateways + 1 + else + if gateway.status == "leaving" then + gatewayObjectTallies.numGateways = gatewayObjectTallies.numGateways - 1 + gatewayObjectTallies.numExitingGateways = gatewayObjectTallies.numExitingGateways + 1 + if gateway.endTimestamp ~= nil then + if gateway.endTimestamp <= currentTimestamp then + gatewayObjectTallies.numExitingGateways = gatewayObjectTallies.numExitingGateways - 1 + -- prune the gateway + GatewayRegistry[gatewayAddress] = nil + table.insert(result.prunedGateways, gatewayAddress) + else + -- find the next prune timestamp + gar.scheduleNextGatewaysPruning(gateway.endTimestamp) + end + end + end + end + end + end + + result.gatewayObjectTallies = gatewayObjectTallies + + return result + end + + function gar.slashOperatorStake(address, slashAmount, currentTimestamp) + assert(utils.isInteger(slashAmount), "Slash amount must be an integer") + assert(slashAmount > 0, "Slash amount must be greater than 0") + + local gateway = gar.getGateway(address) + assert(gateway, "Gateway not found") + local garSettings = gar.getSettings() + assert(garSettings, "Gateway Registry settings do not exist") + + gateway.operatorStake = gateway.operatorStake - slashAmount + gateway.slashings = gateway.slashings or {} + gateway.slashings[tostring(currentTimestamp)] = slashAmount + balances.increaseBalance(ao.id, slashAmount) + GatewayRegistry[address] = gateway + end + + ---@param cursor string|nil # The cursor gateway address after which to fetch more gateways (optional) + ---@param limit number # The max number of gateways to fetch + ---@param sortBy string # The gateway field to sort by. Default is "gatewayAddress" (which is added each time) + ---@param sortOrder string # The order to sort by, either "asc" or "desc" + ---@return table # A table containing the paginated gateways and pagination metadata + function gar.getPaginatedGateways(cursor, limit, sortBy, sortOrder) + local gateways = gar.getGateways() + local gatewaysArray = {} + local cursorField = "gatewayAddress" -- the cursor will be the gateway address + for address, gateway in pairs(gateways) do + --- @diagnostic disable-next-line: inject-field + gateway.gatewayAddress = address + -- remove delegates and vaults to avoid sending unbounded arrays, they can be fetched via getPaginatedDelegates and getPaginatedVaults + gateway.delegates = nil + gateway.vaults = nil + table.insert(gatewaysArray, gateway) + end + + return utils.paginateTableWithCursor(gatewaysArray, cursor, cursorField, limit, sortBy, sortOrder) + end + + ---@param address string # The address of the gateway + ---@param cursor string|nil # The cursor delegate address after which to fetch more delegates (optional) + ---@param limit number # The max number of delegates to fetch + ---@param sortBy string # The delegate field to sort by. Default is "address" (which is added each) + ---@param sortOrder string # The order to sort by, either "asc" or "desc" + ---@return table # A table containing the paginated delegates and pagination metadata + function gar.getPaginatedDelegates(address, cursor, limit, sortBy, sortOrder) + local gateway = gar.getGateway(address) + assert(gateway, "Gateway not found") + local delegatesArray = {} + local cursorField = "address" + for delegateAddress, delegate in pairs(gateway.delegates) do + --- @diagnostic disable-next-line: inject-field + delegate.address = delegateAddress + delegate.vaults = nil -- remove vaults to avoid sending an unbounded array, we can fetch them if needed via getPaginatedDelegations + table.insert(delegatesArray, delegate) + end + + return utils.paginateTableWithCursor(delegatesArray, cursor, cursorField, limit, sortBy, sortOrder) + end + + --- Returns all allowed delegates if allowlisting is in use. Empty table otherwise. + ---@param address string # The address of the gateway + ---@param cursor string|nil # The cursor delegate address after which to fetch more delegates (optional) + ---@param limit number # The max number of delegates to fetch + ---@param sortOrder string # The order to sort by, either "asc" or "desc" + ---@return table # A table containing the paginated allowed delegates and pagination metadata + function gar.getPaginatedAllowedDelegates(address, cursor, limit, sortOrder) + local gateway = gar.getGateway(address) + assert(gateway, "Gateway not found") + local allowedDelegatesArray = {} + + if gateway.settings.allowedDelegatesLookup then + for delegateAddress, _ in pairs(gateway.settings.allowedDelegatesLookup) do + table.insert(allowedDelegatesArray, delegateAddress) + end + for delegateAddress, delegate in pairs(gateway.delegates) do + if delegate.delegatedStake > 0 then + table.insert(allowedDelegatesArray, delegateAddress) + end + end + end + + local cursorField = nil + local sortBy = nil + return utils.paginateTableWithCursor(allowedDelegatesArray, cursor, cursorField, limit, sortBy, sortOrder) + end + + function gar.cancelGatewayWithdrawal(from, gatewayAddress, vaultId) + local gateway = gar.getGateway(gatewayAddress) + assert(gateway, "Gateway not found") + + assert(gateway.status ~= "leaving", "Gateway is leaving the network and cannot cancel withdrawals.") + + local existingVault, delegate + local isGatewayWithdrawal = from == gatewayAddress + -- if the from matches the gateway address, we are cancelling the operator withdrawal + if isGatewayWithdrawal then + existingVault = gateway.vaults[vaultId] + else + delegate = gateway.delegates[from] + assert(delegate, "Delegate not found") + existingVault = delegate.vaults[vaultId] + end + + assert(existingVault, "Vault not found for " .. from .. " on " .. gatewayAddress) + + -- confirm the gateway still allow staking + assert(isGatewayWithdrawal or gateway.settings.allowDelegatedStaking, "Gateway does not allow staking") + + local previousOperatorStake = gateway.operatorStake + local previousTotalDelegatedStake = gateway.totalDelegatedStake + local vaultBalance = existingVault.balance + if isGatewayWithdrawal then + cancelGatewayWithdrawVault(gateway, vaultId) + else + cancelGatewayDelegateVault(gateway, from, vaultId) + end + GatewayRegistry[gatewayAddress] = gateway + return { + previousOperatorStake = previousOperatorStake, + previousTotalDelegatedStake = previousTotalDelegatedStake, + totalOperatorStake = gateway.operatorStake, + totalDelegatedStake = gateway.totalDelegatedStake, + vaultBalance = vaultBalance, + gateway = gateway, + } + end + + ---@param from string # The address of the operator or delegate + ---@param gatewayAddress string # The address of the gateway + ---@param vaultId string # The id of the vault + ---@param currentTimestamp number # The current timestamp + ---@return table # A table containing the gateway, elapsed time, remaining time, penalty rate, expedited withdrawal fee, and amount withdrawn + function gar.instantGatewayWithdrawal(from, gatewayAddress, vaultId, currentTimestamp) + local gateway = gar.getGateway(gatewayAddress) + assert(gateway, "Gateway not found") + + local isGatewayWithdrawal = from == gatewayAddress + + -- the protected operator vault is the vault that represents the gateway operators minimum stake and cannot be instantly withdrawn + local isGatewayProtectedVault = vaultId == gatewayAddress + + local vault + local delegate + if isGatewayWithdrawal then + assert(gateway.vaults[vaultId], "Vault not found") + assert(not isGatewayProtectedVault, "Gateway operator minimum stake vault cannot be instantly withdrawn.") + vault = gateway.vaults[vaultId] + else + delegate = gateway.delegates[from] + assert(delegate, "Delegate not found") + assert(delegate.vaults[vaultId], "Vault not found") + vault = delegate.vaults[vaultId] + end + + ---@type number + local elapsedTime = currentTimestamp - vault.startTimestamp + ---@type number + local totalWithdrawalTime = vault.endTimestamp - vault.startTimestamp + + -- Ensure the elapsed time is not negative + assert(elapsedTime >= 0, "Invalid elapsed time") + + -- Process the instant withdrawal + local expeditedWithdrawalFee, amountToWithdraw, penaltyRate = + processInstantWithdrawal(vault.balance, elapsedTime, totalWithdrawalTime, from) + + -- Remove the vault after withdrawal + if isGatewayWithdrawal then + gateway.vaults[vaultId] = nil + else + assert(delegate, "Delegate not found") + delegate.vaults[vaultId] = nil + -- Remove the delegate if no stake is left + if delegate.delegatedStake == 0 and next(delegate.vaults) == nil then + gar.pruneDelegateFromGatewayIfNecessary(from, gateway) + end + end + + -- Update the gateway + GatewayRegistry[gatewayAddress] = gateway + return { + gateway = gateway, + elapsedTime = elapsedTime, + remainingTime = totalWithdrawalTime - elapsedTime, + penaltyRate = penaltyRate, + expeditedWithdrawalFee = expeditedWithdrawalFee, + amountWithdrawn = amountToWithdraw, + } + end + + --- Preserves delegate's position in allow list upon removal from gateway + --- @param delegateAddress string The address of the delegator + --- @param gateway table The gateway from which the delegate is being removed + --- @return boolean # Whether or not the delegate was pruned + function gar.pruneDelegateFromGatewayIfNecessary(delegateAddress, gateway) + local pruned = false + local delegate = gateway.delegates[delegateAddress] + if delegate.delegatedStake == 0 and utils.lengthOfTable(delegate.vaults) == 0 then + gateway.delegates[delegateAddress] = nil + pruned = true + + -- replace the delegate in the allowedDelegatesLookup table if necessary + if gateway.settings.allowedDelegatesLookup then + gateway.settings.allowedDelegatesLookup[delegateAddress] = true + end + end + return pruned + end + + --- Add delegate addresses to the allowedDelegatesLookup table in the gateway's settings + --- @param delegateAddresses table The list of delegate addresses to add + --- @param gatewayAddress string The address of the gateway + --- @return table result Result table containing updated gateway object and the delegates that were actually added + function gar.allowDelegates(delegateAddresses, gatewayAddress) + local gateway = gar.getGateway(gatewayAddress) + assert(gateway, "Gateway not found") + + -- Only allow modification of the allow list when allowDelegatedStaking is set to false or a current allow list is in place + assert( + not gateway.settings.allowDelegatedStaking or gateway.settings.allowedDelegatesLookup, + "Allow listing only possible when allowDelegatedStaking is set to 'allowlist'" + ) + + assert(gateway.settings.allowedDelegatesLookup, "allowedDelegatesLookup should not be nil") + + local addedDelegates = {} + for _, delegateAddress in ipairs(delegateAddresses) do + assert(utils.isValidAddress(delegateAddress, true), "Invalid delegate address: " .. delegateAddress) + -- Skip over delegates that are already in the allow list or that have a stake balance + if not gar.delegateAllowedToStake(delegateAddress, gateway) then + gateway.settings.allowedDelegatesLookup[delegateAddress] = true + table.insert(addedDelegates, delegateAddress) + end + end + + GatewayRegistry[gatewayAddress] = gateway + return { + gateway = gateway, + addedDelegates = addedDelegates, + } + end + + function gar.isEligibleForArNSDiscount(from) + local gateway = gar.getGatewayUnsafe(from) + if gateway == nil or gateway.weights == nil or gar.isGatewayLeaving(gateway) then + return false + end + + local tenureWeight = gateway.weights.tenureWeight or 0 + local gatewayPerformanceRatio = gateway.weights.gatewayPerformanceRatio or 0 + + return tenureWeight >= constants.GATEWAY_OPERATOR_ARNS_DISCOUNT_TENURE_WEIGHT_ELIGIBILITY_THRESHOLD + and gatewayPerformanceRatio + >= constants.GATEWAY_OPERATOR_ARNS_DISCOUNT_PERFORMANCE_RATIO_ELIGIBILITY_THRESHOLD + end + + --- Remove delegate addresses from the allowedDelegatesLookup table in the gateway's settings + --- @param delegates WalletAddress[] The list of delegate addresses to remove + --- @param gatewayAddress WalletAddress The address of the gateway + --- @param msgId MessageId The associated message ID + --- @param currentTimestamp Timestamp The current timestamp + --- @return table result Result table containing updated gateway object and the delegates that were actually removed + function gar.disallowDelegates(delegates, gatewayAddress, msgId, currentTimestamp) + local gateway = gar.getGateway(gatewayAddress) + assert(gateway, "Gateway not found") + + -- Only allow modification of the allow list when allowDelegatedStaking is set to false or a current allow list is in place + assert( + not gateway.settings.allowDelegatedStaking or gateway.settings.allowedDelegatesLookup, + "Allow listing only possible when allowDelegatedStaking is set to 'allowlist'" + ) + + assert(gateway.settings.allowedDelegatesLookup, "allowedDelegatesLookup should not be nil") + + local removedDelegates = {} + for _, delegateToDisallow in ipairs(delegates) do + assert(utils.isValidAddress(delegateToDisallow, true), "Invalid delegate address: " .. delegateToDisallow) + + -- Skip over delegates that are not in the allow list + if gateway.settings.allowedDelegatesLookup[delegateToDisallow] then + gateway.settings.allowedDelegatesLookup[delegateToDisallow] = nil + table.insert(removedDelegates, delegateToDisallow) + end + -- Kick the delegate off the gateway if necessary + local ban = true + gar.kickDelegateFromGateway(delegateToDisallow, gateway, msgId, currentTimestamp, ban) + end + + GatewayRegistry[gatewayAddress] = gateway + return { + gateway = gateway, + removedDelegates = removedDelegates, + } + end + + --- Vaults delegate's tokens and updates delegate and gateway staking balances + --- @param delegateAddress string The address of the delegator + --- @param gateway Gateway The gateway from which to kick the delegate + --- @param msgId MessageId The currently message ID + --- @param currentTimestamp number The current timestamp + --- @param ban boolean|nil Prevents adding the delegate back to the allowlist + function gar.kickDelegateFromGateway(delegateAddress, gateway, msgId, currentTimestamp, ban) + local delegate = gateway.delegates[delegateAddress] + if not delegate then + return + end + + if not delegate.vaults then + delegate.vaults = {} + end + + local remainingStake = delegate.delegatedStake + if remainingStake > 0 then + createDelegateWithdrawVault(gateway, delegateAddress, msgId, remainingStake, currentTimestamp) + end + decreaseDelegateStakeAtGateway(delegateAddress, gateway, remainingStake, ban) + end + + function gar.delegationAllowlistedOnGateway(gateway) + return gateway.settings.allowedDelegatesLookup ~= nil + end + + function gar.delegateAllowedToStake(delegateAddress, gateway) + if not gar.delegationAllowlistedOnGateway(gateway) then + return true + end + -- Delegate must either be in the allow list or have a balance greater than 0 + return gateway.settings.allowedDelegatesLookup[delegateAddress] + or (gateway.delegates[delegateAddress] and gateway.delegates[delegateAddress].delegatedStake or 0) > 0 + end + + --- @alias VaultId string + --- @alias GatewayAddress WalletAddress + + --- @class StakeSpendingPlan + --- @field delegatedStake number + --- @field vaults table + + --- @class FundingPlan + --- @field address WalletAddress + --- @field balance number + --- @field stakes table + --- @field shortfall number + + --- @param address WalletAddress the funder of the funding plan + --- @param quantity number the amount the funding plan aims to satisfy + --- @param sourcesPreference "any"|"balance"|"stakes" the allowed funding sources + --- @return FundingPlan + function gar.getFundingPlan(address, quantity, sourcesPreference) + sourcesPreference = sourcesPreference or "balance" + local fundingPlan = { + address = address, + balance = 0, + stakes = {}, + shortfall = quantity, + } + + planBalanceDrawdown(fundingPlan, sourcesPreference) + + -- early return if possible. Otherwise we'll move on to using withdraw vaults + if fundingPlan.shortfall == 0 or sourcesPreference == "balance" then + return fundingPlan + end + + local stakingProfile = planVaultsDrawdown(fundingPlan) + + -- early return if possible. Otherwise we'll move on to use excess stakes + if fundingPlan.shortfall == 0 then + return fundingPlan + end + + planExcessStakesDrawdown(fundingPlan, stakingProfile) + + -- early return if possible. Otherwise we'll move on to using minimum stakes + if fundingPlan.shortfall == 0 then + return fundingPlan + end + + planMinimumStakesDrawdown(fundingPlan, stakingProfile) + + return fundingPlan + end + + function planBalanceDrawdown(fundingPlan, sourcesPreference) + local availableBalance = balances.getBalance(fundingPlan.address) + if sourcesPreference == "balance" or sourcesPreference == "any" then + fundingPlan.balance = math.min(availableBalance, fundingPlan.shortfall) + fundingPlan.shortfall = fundingPlan.shortfall - fundingPlan.balance + end + end + + function getStakingProfile(address) + return utils.sortTableByFields( + utils.reduce( + -- only consider gateways that have the address as a delegate + utils.filterDictionary(gar.getGatewaysUnsafe(), function(_, gateway) + return gateway.delegates[address] ~= nil + end), + -- extract only the essential gateway fields, copying tables so we don't mutate references + function(acc, gatewayAddress, gateway) + local totalEpochsGatewayPassed = gateway.stats.passedEpochCount or 0 + local totalEpochsParticipatedIn = gateway.stats.totalEpochCount or 0 + local gatewayPerformanceRatio = (1 + totalEpochsGatewayPassed) / (1 + totalEpochsParticipatedIn) + local delegate = utils.deepCopy(gateway.delegates[address]) + delegate.excessStake = math.max(0, delegate.delegatedStake - gateway.settings.minDelegatedStake) + delegate.gatewayAddress = gatewayAddress + table.insert(acc, { + totalDelegatedStake = gateway.totalDelegatedStake, -- for comparing gw total stake + gatewayPerformanceRatio = gatewayPerformanceRatio, -- for comparing gw performance + delegate = delegate, + startTimestamp = gateway.startTimestamp, -- for comparing gw tenure + }) + return acc + end, + {} + ), + { + { + order = "desc", + field = "delegate.excessStake", + }, + { + order = "asc", + field = "gatewayPerformanceRatio", + }, + { + order = "desc", + field = "totalDelegatedStake", + }, + { + order = "desc", + field = "startTimestamp", + }, + } + ) + end + + function planVaultsDrawdown(fundingPlan) + -- find all the address's delegations across the gateways + local stakingProfile = getStakingProfile(fundingPlan.address) + + -- simulate drawing down vaults until the remaining balance is satisfied OR vaults are exhausted + local vaults = utils.sortTableByFields( + -- flatten the vaults across all gateways so we can sort them together + utils.reduce(stakingProfile, function(acc, _, gatewayInfo) + for vaultId, vault in pairs(gatewayInfo.delegate.vaults) do + table.insert(acc, { + vaultId = vaultId, + gatewayAddress = gatewayInfo.delegate.gatewayAddress, + endTimestamp = vault.endTimestamp, + balance = vault.balance, + }) + end + return acc + end, {}), + { + { + order = "asc", + field = "endTimestamp", + }, + } + ) + + for _, vault in pairs(vaults) do + if fundingPlan.shortfall == 0 then + break + end + local balance = vault.balance + local balanceToDraw = math.min(balance, fundingPlan.shortfall) + local gatewayAddress = vault.gatewayAddress + if balanceToDraw > 0 then + if not fundingPlan["stakes"][gatewayAddress] then + fundingPlan["stakes"][gatewayAddress] = { + delegatedStake = 0, + vaults = {}, + } + end + fundingPlan["stakes"][gatewayAddress].vaults[vault.vaultId] = balanceToDraw + fundingPlan.shortfall = fundingPlan.shortfall - balanceToDraw + vault.balance = balance - balanceToDraw + end + end + + return stakingProfile + end + + function planExcessStakesDrawdown(fundingPlan, stakingProfile) + -- simulate drawing down excess stakes until the remaining balance is satisfied OR excess stakes are exhausted + for _, gatewayInfo in pairs(stakingProfile) do + if fundingPlan.shortfall == 0 then + break + end + local excessStake = gatewayInfo.delegate.excessStake + local stakeToDraw = math.min(excessStake, fundingPlan.shortfall) + if stakeToDraw > 0 then + if not fundingPlan["stakes"][gatewayInfo.delegate.gatewayAddress] then + fundingPlan["stakes"][gatewayInfo.delegate.gatewayAddress] = { + delegatedStake = 0, + vaults = {}, + } + end + fundingPlan["stakes"][gatewayInfo.delegate.gatewayAddress].delegatedStake = stakeToDraw + fundingPlan.shortfall = fundingPlan.shortfall - stakeToDraw + gatewayInfo.delegate.delegatedStake = gatewayInfo.delegate.delegatedStake - stakeToDraw + -- maintain consistency for future re-sorting of the gatewayInfos based on theoretical updated state + gatewayInfo.delegate.excessStake = excessStake - stakeToDraw + gatewayInfo.totalDelegatedStake = gatewayInfo.totalDelegatedStake - stakeToDraw + end + end + return stakingProfile + end + + function planMinimumStakesDrawdown(fundingPlan, stakingProfile) + -- re-sort the gateways since their totalDelegatedStakes may have changed + stakingProfile = utils.sortTableByFields(stakingProfile, { + { + order = "asc", + field = "gatewayPerformanceRatio", + }, + { + order = "desc", + field = "totalDelegatedStake", + }, + { + order = "desc", + field = "startTimestamp", + }, + }) + + for _, gatewayInfo in pairs(stakingProfile) do + if fundingPlan.shortfall == 0 then + break + end + + local stakeToDraw = math.min(gatewayInfo.delegate.delegatedStake, fundingPlan.shortfall) + if stakeToDraw > 0 then + if not fundingPlan["stakes"][gatewayInfo.delegate.gatewayAddress] then + fundingPlan["stakes"][gatewayInfo.delegate.gatewayAddress] = { + delegatedStake = 0, + vaults = {}, + } + end + fundingPlan["stakes"][gatewayInfo.delegate.gatewayAddress].delegatedStake = fundingPlan["stakes"][gatewayInfo.delegate.gatewayAddress].delegatedStake + + stakeToDraw + fundingPlan.shortfall = fundingPlan.shortfall - stakeToDraw + -- not needed after this, but keep track + gatewayInfo.delegate.delegatedStake = gatewayInfo.delegate.delegatedStake - stakeToDraw + gatewayInfo.totalDelegatedStake = gatewayInfo.totalDelegatedStake - stakeToDraw + end + end + end + + --- Reduces all balances and creates withdraw stakes as prescribed by the funding plan + --- @param fundingPlan table The funding plan to apply + --- @param msgId string The current message ID + --- @param currentTimestamp number The current timestamp + function gar.applyFundingPlan(fundingPlan, msgId, currentTimestamp) + local appliedPlan = { + totalFunded = 0, + newWithdrawVaults = {}, + } + + -- draw down balance first + if fundingPlan.balance > 0 then + balances.reduceBalance(fundingPlan.address, fundingPlan.balance) + appliedPlan.totalFunded = appliedPlan.totalFunded + fundingPlan.balance + end + + --draw down stakes and vaults, creating withdraw vaults if necessary + for gatewayAddress, delegationPlan in pairs(fundingPlan.stakes) do + local gateway = gar.getGateway(gatewayAddress) + assert(gateway, "Gateway not found") + local delegate = gateway.delegates[fundingPlan.address] + assert(delegate, "Delegate not found") + + -- draw down the vaults first so that allowlisting logic will work correctly when drawing down balances + delegate.vaults = utils.reduce(delegate.vaults, function(acc, vaultId, vault) + if delegationPlan.vaults[vaultId] then + -- if the whole vault is used, "prune" it by moving on + if vault.balance ~= delegationPlan.vaults[vaultId] then + acc[vaultId] = { + balance = vault.balance - delegationPlan.vaults[vaultId], + startTimestamp = vault.startTimestamp, + endTimestamp = vault.endTimestamp, + } + gar.scheduleNextGatewaysPruning(vault.endTimestamp) + assert(acc[vaultId].balance > 0, "Vault balance should be greater than 0") + end + appliedPlan.totalFunded = appliedPlan.totalFunded + delegationPlan.vaults[vaultId] + else + -- nothing to change + acc[vaultId] = vault + end + return acc + end, {}) + + -- draw down the delegated stake balance + assert(delegate.delegatedStake - delegationPlan.delegatedStake >= 0, "Delegated stake cannot be negative") + assert( + gateway.totalDelegatedStake - delegationPlan.delegatedStake >= 0, + "Total delegated stake cannot be negative" + ) + decreaseDelegateStakeAtGateway(fundingPlan.address, gateway, delegationPlan.delegatedStake) + appliedPlan.totalFunded = appliedPlan.totalFunded + delegationPlan.delegatedStake + + -- create an exit vault for the remaining stake if less than the gateway's minimum + if delegate.delegatedStake > 0 and delegate.delegatedStake < gateway.settings.minDelegatedStake then + createDelegateWithdrawVault( + gateway, + fundingPlan.address, + msgId, + delegate.delegatedStake, + currentTimestamp + ) + decreaseDelegateStakeAtGateway(fundingPlan.address, gateway, delegate.delegatedStake) + appliedPlan.newWithdrawVaults[gatewayAddress] = { + [msgId] = utils.deepCopy(delegate.vaults[msgId]), + } + end + + -- update the gateway + GatewayRegistry[gatewayAddress] = gateway + end + + return appliedPlan + end + + --- Fetch copies of all the delegations present across all gateways for the given address + --- @param address string The address of the delegator + --- @return table # a table, indexed by gateway address, of all the address's delegations, including nested vaults + function gar.getDelegations(address) + return utils.reduce(gar.getGatewaysUnsafe(), function(acc, gatewayAddress, gateway) + if gateway.delegates[address] then + acc[gatewayAddress] = utils.deepCopy(gateway.delegates[address]) + end + return acc + end, {}) + end + + --- If delegate is missing or stake is 0, then not eligible for distributions + --- @param gateway table The gateway to check + --- @param delegateAddress string The address of the delegate to check + --- @return boolean isEligible - whether the delegate is eligible for distributions + function gar.isDelegateEligibleForDistributions(gateway, delegateAddress) + return gateway.delegates[delegateAddress] and gateway.delegates[delegateAddress].delegatedStake > 0 + end + + ---@class Delegation + ---@field type string # The type of the object. Either "stake" or "vault" + ---@field gatewayAddress string # The address of the gateway the delegation is associated with + ---@field delegateStake number|nil # The amount of stake delegated to the gateway if type is "stake" + ---@field startTimestamp number # The start timestamp of the delegation's initial stake or the vault's creation + ---@field messageId string|nil # The message ID associated with the vault's creation if type is "vault" + ---@field balance number|nil # The balance of the vault if type is "vault" + ---@field endTimestamp number|nil # The end timestamp of the vault if type is "vault" + ---@field delegationId string # The unique ID of the delegation + + --- Fetch a flattened array of all the delegations (stakes and vaults) present across all gateways for the given address + --- @param address string The address of the delegator + --- @return Delegation[] # A table of all the address's staked and vaulted delegations + function gar.getFlattenedDelegations(address) + return utils.reduce(gar.getDelegations(address), function(acc, gatewayAddress, delegation) + table.insert(acc, { + type = "stake", + gatewayAddress = gatewayAddress, + balance = delegation.delegatedStake, + startTimestamp = delegation.startTimestamp, + delegationId = gatewayAddress .. "_" .. delegation.startTimestamp, + }) + for vaultId, vault in pairs(delegation.vaults) do + table.insert(acc, { + type = "vault", + gatewayAddress = gatewayAddress, + startTimestamp = vault.startTimestamp, + vaultId = vaultId, + balance = vault.balance, + endTimestamp = vault.endTimestamp, + delegationId = gatewayAddress .. "_" .. vault.startTimestamp, + }) + end + return acc + end, {}) + end + + --- Fetch a heterogenous array of all active and vaulted delegated stakes, cursored on startTimestamp + --- @param address string The address of the delegator + --- @param cursor string|number|nil The cursor after which to fetch more stakes (optional) + --- @param limit number The max number of stakes to fetch + --- @param sortBy string The field to sort by. Default is "startTimestamp" + --- @param sortOrder string The order to sort by, either "asc" or "desc". Default is "asc" + --- @return PaginatedTable # A table containing the paginated stakes and pagination metadata as Delegation objects + function gar.getPaginatedDelegations(address, cursor, limit, sortBy, sortOrder) + local delegationsArray = gar.getFlattenedDelegations(address) + return utils.paginateTableWithCursor( + delegationsArray, + cursor, + "delegationId", + limit, + sortBy or "startTimestamp", + sortOrder or "asc" + ) + end + + --- @type { [string]: { timestamp: number, redelegations: number } } + Redelegations = Redelegations or {} + + function gar.pruneRedelegationFeeData(currentTimestamp) + local delegatorsWithFeesReset = {} + if not NextRedelegationsPruneTimestamp or currentTimestamp < NextRedelegationsPruneTimestamp then + -- No known pruning work to do + return delegatorsWithFeesReset + end + + local pruningThreshold = currentTimestamp + - constants.DEFAULT_GAR_SETTINGS.redelegations.redelegationFeeResetIntervalMs + + -- reset the next prune timestamp, below will populate it with the next prune timestamp minimum + NextRedelegationsPruneTimestamp = nil + + Redelegations = utils.reduce(gar.getRedelgationsUnsafe(), function(acc, delegateAddress, redelegationData) + if redelegationData.timestamp > pruningThreshold then + acc[delegateAddress] = redelegationData + gar.scheduleNextRedelegationsPruning( + redelegationData.timestamp + + constants.DEFAULT_GAR_SETTINGS.redelegations.redelegationFeeResetIntervalMs + ) + else + table.insert(delegatorsWithFeesReset, delegateAddress) + end + return acc + end, {}) + + return delegatorsWithFeesReset + end + + function gar.getRedelgations() + return utils.deepCopy(Redelegations) + end + + function gar.getRedelgationsUnsafe() + return Redelegations + end + + function gar.getRedelegation(delegateAddress) + return gar.getRedelgations()[delegateAddress] + end + + function gar.getRedelegationUnsafe(delegateAddress) + return gar.getRedelgationsUnsafe()[delegateAddress] + end + + --- @class RedelegateStakeParams + --- @field delegateAddress string # The address of the delegate to redelegate stake from (required) + --- @field sourceAddress string # The address of the gateway to redelegate stake from (required) + --- @field targetAddress string # The address of the gateway to redelegate stake to (required) + --- @field qty number # The amount of stake to redelegate - must be positive integer (required) + --- @field currentTimestamp number # The current timestamp (required) + --- @field vaultId string | nil # The vault id to redelegate from (optional) + + --- @class RedelegateStakeResult + --- @field sourceAddress WalletAddress # The address of the gateway that the stake was moved from + --- @field targetAddress table # The address of the gateway that the stake was moved to + --- @field redelegationFee number # The fee charged for the redelegation + --- @field feeResetTimestamp number # The timestamp when the redelegation fee will be reset + --- @field redelegationsSinceFeeReset number # The number of redelegations the user has made since the last fee reset + + --- Take stake from a delegate and stake it to a new delegate. + --- This function will be called by the delegate to redelegate their stake to a new gateway. + --- The delegated stake will be moved from the old gateway to the new gateway. + --- It will fail if there is no or not enough delegated stake to move from the gateway. + --- It will fail if the old gateway does not meet the minimum staking requirements after the stake is moved. + --- It can move stake from the vaulted stake + --- It can move stake from its own stake as long as it meets the minimum staking requirements after the stake is moved. + --- @param params RedelegateStakeParams + --- @return RedelegateStakeResult + function gar.redelegateStake(params) + local delegateAddress = params.delegateAddress + local targetAddress = params.targetAddress + local sourceAddress = params.sourceAddress + local stakeToTakeFromSource = params.qty + local currentTimestamp = params.currentTimestamp + local vaultId = params.vaultId + + assert(type(stakeToTakeFromSource) == "number", "Quantity is required and must be a number") + assert(stakeToTakeFromSource > 0, "Quantity must be greater than 0") + assert(utils.isValidAddress(targetAddress, true), "Target address is required and must be a string") + assert(utils.isValidAddress(sourceAddress, true), "Source address is required and must be a string") + assert(utils.isValidAddress(delegateAddress, true), "Delegate address is required and must be a string") + assert(type(currentTimestamp) == "number", "Current timestamp is required and must be a number") + assert(sourceAddress ~= targetAddress, "Source and target gateway addresses must be different.") + + local sourceGateway = gar.getGateway(sourceAddress) + local targetGateway = gar.getGateway(targetAddress) + + assert(sourceGateway, "Source Gateway not found") + assert(targetGateway, "Target Gateway not found") + assert( + targetGateway.status ~= "leaving", + "Target Gateway is leaving the network and cannot have more stake delegated to it." + ) + assert(targetGateway.settings.allowDelegatedStaking, "Target Gateway does not allow delegated staking.") + assert( + gar.delegateAllowedToStake(delegateAddress, targetGateway), + "This Gateway does not allow this delegate to stake." + ) + + local redelegationFeeRate = gar.getRedelegationFee(delegateAddress).redelegationFeeRate + local redelegationFee = math.ceil(stakeToTakeFromSource * (redelegationFeeRate / 100)) + local stakeToDelegate = stakeToTakeFromSource - redelegationFee + + assert( + stakeToDelegate > 0, + "The redelegation stake amount minus the redelegation fee is too low to redelegate." + ) + + -- Assert source has enough stake to redelegate and remove the stake from the source + if delegateAddress == sourceAddress then + -- check if the gateway can afford to redelegate from itself + + if vaultId then + -- Get the redelegation amount from the operator vault + + local existingVault = sourceGateway.vaults[vaultId] + assert(existingVault, "Vault not found on the operator.") + assert( + existingVault.balance >= stakeToTakeFromSource, + "Quantity must be less than or equal to the vaulted stake amount." + ) + + reduceStakeFromGatewayVault(sourceGateway, stakeToTakeFromSource, vaultId) + else + -- Get the redelegation amount from the operator stakes + local maxWithdraw = sourceGateway.operatorStake - gar.getSettings().operators.minStake + assert( + stakeToTakeFromSource <= maxWithdraw, + "Resulting stake of " + .. sourceGateway.operatorStake - stakeToTakeFromSource + .. " mARIO is not enough to maintain the minimum operator stake of " + .. gar.getSettings().operators.minStake + .. " mARIO" + ) + + sourceGateway.operatorStake = sourceGateway.operatorStake - stakeToTakeFromSource + end + else + local existingDelegate = sourceGateway.delegates[delegateAddress] + assert(existingDelegate, "This delegate has no stake to redelegate.") + + if vaultId then + local existingVault = existingDelegate.vaults[vaultId] + assert(existingVault, "Vault not found on the delegate.") + assert( + existingVault.balance >= stakeToTakeFromSource, + "Quantity must be less than or equal to the vaulted stake amount." + ) + + reduceStakeFromDelegateVault(sourceGateway, delegateAddress, stakeToTakeFromSource, vaultId) + else + -- Check if the delegate has enough stake to redelegate + assert( + existingDelegate.delegatedStake >= stakeToTakeFromSource, + "Quantity must be less than or equal to the delegated stake amount." + ) + + -- Check if the delegate will have enough stake left after re-delegating + local existingStake = existingDelegate.delegatedStake + local requiredMinimumStake = sourceGateway.settings.minDelegatedStake + local maxAllowedToWithdraw = existingStake - requiredMinimumStake + assert( + stakeToTakeFromSource <= maxAllowedToWithdraw or stakeToTakeFromSource == existingStake, + "Remaining delegated stake must be greater than the minimum delegated stake. Adjust the amount or re-delegate all stake." + ) + decreaseDelegateStakeAtGateway(delegateAddress, sourceGateway, stakeToTakeFromSource) + end + end + + -- The stake can now be applied to the targetGateway + if targetAddress == delegateAddress then + -- move the stake to the operator's stake + targetGateway.operatorStake = targetGateway.operatorStake + stakeToDelegate + else + local existingTargetDelegate = targetGateway.delegates[delegateAddress] + local minimumStakeForGatewayAndDelegate + if existingTargetDelegate and existingTargetDelegate.delegatedStake ~= 0 then + -- It already has a stake that is not zero + minimumStakeForGatewayAndDelegate = 1 -- Delegate must provide at least one additional mARIO + else + -- Consider if the operator increases the minimum amount after you've already staked + minimumStakeForGatewayAndDelegate = targetGateway.settings.minDelegatedStake + end + + -- Check if the delegate has enough stake to redelegate + assert( + stakeToDelegate >= minimumStakeForGatewayAndDelegate, + "Quantity must be greater than the minimum delegated stake amount." + ) + + targetGateway.delegates[delegateAddress] = targetGateway.delegates[delegateAddress] + or gar.createDelegateAtGateway(currentTimestamp, targetGateway, delegateAddress) + increaseDelegateStakeAtGateway(targetGateway.delegates[delegateAddress], targetGateway, stakeToDelegate) + end + + -- Move redelegation fee to protocol balance + balances.increaseBalance(ao.id, redelegationFee) + + local previousRedelegations = gar.getRedelegation(delegateAddress) + local redelegationsSinceFeeReset = (previousRedelegations and previousRedelegations.redelegations or 0) + 1 + + -- update the source and target gateways, and the delegator's redelegation fee data + GatewayRegistry[sourceAddress] = sourceGateway + GatewayRegistry[targetAddress] = targetGateway + Redelegations[delegateAddress] = { + timestamp = currentTimestamp, + redelegations = redelegationsSinceFeeReset, + } + gar.scheduleNextRedelegationsPruning( + currentTimestamp + constants.DEFAULT_GAR_SETTINGS.redelegations.redelegationFeeResetIntervalMs + ) + + return { + sourceAddress = sourceAddress, + targetAddress = targetAddress, + redelegationFee = redelegationFee, + feeResetTimestamp = currentTimestamp + + constants.DEFAULT_GAR_SETTINGS.redelegations.redelegationFeeResetIntervalMs, + redelegationsSinceFeeReset = redelegationsSinceFeeReset, + } + end + + function gar.getRedelegationFee(delegateAddress) + local previousRedelegations = gar.getRedelegationUnsafe(delegateAddress) + + local previousRedelegationCount = previousRedelegations and previousRedelegations.redelegations or 0 + --- first one is free, max of 60% + local redelegationFeeRate = math.min(10 * previousRedelegationCount, 60) + + local lastRedelegationTimestamp = previousRedelegations and previousRedelegations.timestamp or nil + local feeResetTimestamp = lastRedelegationTimestamp + and lastRedelegationTimestamp + constants.DEFAULT_GAR_SETTINGS.redelegations.redelegationFeeResetIntervalMs + or nil + + return { + redelegationFeeRate = redelegationFeeRate, + feeResetTimestamp = feeResetTimestamp, + } + end + + --- @param gatewayAddress WalletAddress + --- @param cursor string|nil a cursorId to paginate the vaults + --- @param limit number + --- @param sortBy "vaultId"|"startTimestamp"|"endTimestamp"|"balance"|"cursorId"|nil + --- @param sortOrder "asc"|"desc"|nil + --- @return PaginatedTable # A table containing the paginated vaults and pagination metadata + function gar.getPaginatedVaultsForGateway(gatewayAddress, cursor, limit, sortBy, sortOrder) + local unsafeGateway = gar.getGatewayUnsafe(gatewayAddress) + assert(unsafeGateway, "Gateway not found") + + local vaults = utils.reduce(unsafeGateway.vaults, function(acc, vaultId, vault) + table.insert(acc, { + vaultId = vaultId, + cursorId = vaultId .. "_" .. vault.startTimestamp, + balance = vault.balance, + startTimestamp = vault.startTimestamp, + endTimestamp = vault.endTimestamp, + }) + return acc + end, {}) + + return utils.paginateTableWithCursor( + vaults, + cursor, + "cursorId", + limit, + sortBy or "startTimestamp", + sortOrder or "asc" + ) + end + + --- @param gateway Gateway + --- @param vaultId WalletAddress | MessageId + --- @param qty mARIO + --- @param currentTimestamp Timestamp + function createGatewayWithdrawVault(gateway, vaultId, qty, currentTimestamp) + assert(not gateway.vaults[vaultId], "Vault already exists") + + gateway.vaults[vaultId] = { + balance = qty, + startTimestamp = currentTimestamp, + endTimestamp = currentTimestamp + gar.getSettings().operators.withdrawLengthMs, + } + end + + --- @param gateway Gateway + --- @param qty mARIO + --- @param currentTimestamp Timestamp + --- @param gatewayAddress WalletAddress + function createGatewayExitVault(gateway, qty, currentTimestamp, gatewayAddress) + assert(not gateway.vaults[gatewayAddress], "Exit Vault already exists") + -- This vault is protected, and cannot be instantly withdrawn. It MUST use the gateway address as the vault id to prevent instant withdrawals. + gateway.vaults[gatewayAddress] = { + balance = qty, + startTimestamp = currentTimestamp, + endTimestamp = currentTimestamp + gar.getSettings().operators.leaveLengthMs, + } + end + + --- @param gateway Gateway + --- @param delegateAddress WalletAddress + --- @param vaultId MessageId + --- @param qty mARIO + --- @param currentTimestamp Timestamp + function createDelegateWithdrawVault(gateway, delegateAddress, vaultId, qty, currentTimestamp) + local delegate = gateway.delegates[delegateAddress] + assert(delegate, "Delegate not found") + assert(not delegate.vaults[vaultId], "Vault already exists") + + -- Lock the qty in a vault to be unlocked after withdrawal period and decrease the gateway's total delegated stake + gateway.delegates[delegateAddress].vaults[vaultId] = gar.createDelegateVault(qty, currentTimestamp) + end + + ---@param gateway Gateway + ---@param vaultId MessageId + function cancelGatewayWithdrawVault(gateway, vaultId) + local vault = gateway.vaults[vaultId] + assert(vault, "Vault not found") + gateway.vaults[vaultId] = nil + gateway.operatorStake = gateway.operatorStake + vault.balance + end + + ---@param gateway Gateway + ---@param gatewayAddress WalletAddress + ---@param vaultId MessageId + function unlockGatewayWithdrawVault(gateway, gatewayAddress, vaultId) + local vault = gateway.vaults[vaultId] + assert(vault, "Vault not found") + balances.increaseBalance(gatewayAddress, vault.balance) + gateway.vaults[vaultId] = nil + end + + ---@param gateway Gateway + ---@param delegateAddress WalletAddress + function cancelGatewayDelegateVault(gateway, delegateAddress, vaultId) + local delegate = gateway.delegates[delegateAddress] + assert(delegate, "Delegate not found") + local vault = delegate.vaults[vaultId] + assert(vault, "Vault not found") + assert( + gar.delegateAllowedToStake(delegateAddress, gateway), + "This Gateway does not allow this delegate to stake." + ) + gateway.delegates[delegateAddress].vaults[vaultId] = nil + increaseDelegateStakeAtGateway(delegate, gateway, vault.balance) + end + + ---@param gateway Gateway + ---@param delegateAddress WalletAddress + function unlockGatewayDelegateVault(gateway, delegateAddress, vaultId) + local delegate = gateway.delegates[delegateAddress] + assert(delegate, "Delegate not found") + local vault = delegate.vaults[vaultId] + assert(vault, "Vault not found") + + balances.increaseBalance(delegateAddress, vault.balance) + -- delete the delegate's vault and prune the delegate if necessary + gateway.delegates[delegateAddress].vaults[vaultId] = nil + gar.pruneDelegateFromGatewayIfNecessary(delegateAddress, gateway) + end + + --- @param gateway Gateway + --- @param qty mARIO + --- @param vaultId MessageId + function reduceStakeFromGatewayVault(gateway, qty, vaultId) + local vault = gateway.vaults[vaultId] + assert(vault, "Vault not found") + assert(qty <= vault.balance, "Insufficient balance in vault") + + if qty == vault.balance then + gateway.vaults[vaultId] = nil + else + gateway.vaults[vaultId].balance = vault.balance - qty + end + end + + --- @param gateway Gateway + --- @param delegateAddress WalletAddress + --- @param vaultId MessageId + function reduceStakeFromDelegateVault(gateway, delegateAddress, qty, vaultId) + local delegate = gateway.delegates[delegateAddress] + assert(delegate, "Delegate not found") + local vault = delegate.vaults[vaultId] + assert(vault, "Vault not found") + assert(qty <= vault.balance, "Insufficient balance in vault") + + if qty == vault.balance then + gateway.delegates[delegateAddress].vaults[vaultId] = nil + gar.pruneDelegateFromGatewayIfNecessary(delegateAddress, gateway) + else + gateway.delegates[delegateAddress].vaults[vaultId].balance = vault.balance - qty + end + end + + --- @param timestamp Timestamp + function gar.scheduleNextGatewaysPruning(timestamp) + NextGatewaysPruneTimestamp = math.min(NextGatewaysPruneTimestamp or timestamp, timestamp) + end + + --- @param timestamp Timestamp + function gar.scheduleNextRedelegationsPruning(timestamp) + NextRedelegationsPruneTimestamp = math.min(NextRedelegationsPruneTimestamp or timestamp, timestamp) + end + + function gar.nextGatewaysPruneTimestamp() + return NextGatewaysPruneTimestamp + end + + function gar.nextRedelegationsPruneTimestamp() + return NextRedelegationsPruneTimestamp + end + + --- @class DelegatesFromAllGateways + --- @field cursorId string -- delegateAddress_gatewayAddress + --- @field address WalletAddress + --- @field gatewayAddress WalletAddress + --- @field startTimestamp Timestamp + --- @field delegatedStake mARIO + --- @field vaultedStake mARIO + + --- @param cursor string|nil -- cursorId of the last item in the previous page + --- @param limit number + --- @param sortBy string|nil + --- @param sortOrder string|nil + --- @return PaginatedTable + function gar.getPaginatedDelegatesFromAllGateways(cursor, limit, sortBy, sortOrder) + --- @type DelegatesFromAllGateways[] + local allDelegations = {} + + for gatewayAddress, gateway in pairs(gar.getGatewaysUnsafe()) do + for delegateAddress, delegate in pairs(gateway.delegates) do + table.insert(allDelegations, { + cursorId = delegateAddress .. "_" .. gatewayAddress, + address = delegateAddress, + gatewayAddress = gatewayAddress, + startTimestamp = delegate.startTimestamp, + delegatedStake = delegate.delegatedStake, + vaultedStake = utils.reduce(delegate.vaults, function(acc, _, vault) + return acc + vault.balance + end, 0), + }) + end + end + + return utils.paginateTableWithCursor( + allDelegations, + cursor, + "cursorId", + limit, + sortBy or "delegatedStake", + sortOrder or "desc" + ) + end + + --- @class VaultsFromAllGateways + --- @field cursorId string -- gatewayAddress_vaultId + --- @field vaultId MessageId + --- @field gatewayAddress WalletAddress + --- @field balance mARIO + --- @field startTimestamp Timestamp + --- @field endTimestamp Timestamp + + --- @param cursor string|nil -- cursorId of the last item in the previous page + --- @param limit number + --- @param sortBy 'cursorId'|'vaultId'|'gatewayAddress'|'balance'|'startTimestamp'|'endTimestamp'|nil + --- @param sortOrder string|nil + --- @return PaginatedTable + function gar.getPaginatedVaultsFromAllGateways(cursor, limit, sortBy, sortOrder) + --- @type VaultsFromAllGateways[] + local allVaults = {} + + local gateways = gar.getGatewaysUnsafe() + for gatewayAddress, gateway in pairs(gateways) do + for vaultId, vault in pairs(gateway.vaults) do + table.insert(allVaults, { + cursorId = gatewayAddress .. "_" .. vaultId, + vaultId = vaultId, + gatewayAddress = gatewayAddress, + balance = vault.balance, + startTimestamp = vault.startTimestamp, + endTimestamp = vault.endTimestamp, + }) + end + end + + return utils.paginateTableWithCursor( + allVaults, + cursor, + "cursorId", + limit, + sortBy or "startTimestamp", + sortOrder or "asc" + ) + end + + return gar +end + +_G.package.loaded[".src.gar"] = _loaded_mod_src_gar() + +-- module: ".src.vaults" +local function _loaded_mod_src_vaults() + local balances = require(".src.balances") + local utils = require(".src.utils") + local constants = require(".src.constants") + local vaults = {} + + --- @alias Vaults table> -- A table of vaults indexed by owner address, then by vault id + + --- @class Vault + --- @field balance mARIO The balance of the vault + --- @field controller WalletAddress | nil The controller of a revokable vault. Nil if not revokable (default: nil) + --- @field startTimestamp Timestamp The start timestamp of the vault + --- @field endTimestamp Timestamp The end timestamp of the vault + + --- Creates a vault + --- @param from string The address of the owner + --- @param qty number The quantity of tokens to vault + --- @param lockLengthMs number The lock length in milliseconds + --- @param currentTimestamp number The current timestamp + --- @param vaultId string The vault id + --- @return Vault -- The created vault + function vaults.createVault(from, qty, lockLengthMs, currentTimestamp, vaultId) + assert(qty > 0, "Quantity must be greater than 0") + assert(not vaults.getVault(from, vaultId), "Vault with id " .. vaultId .. " already exists") + assert(balances.walletHasSufficientBalance(from, qty), "Insufficient balance") + assert( + lockLengthMs >= constants.MIN_TOKEN_LOCK_TIME_MS and lockLengthMs <= constants.MAX_TOKEN_LOCK_TIME_MS, + "Invalid lock length. Must be between " + .. constants.MIN_TOKEN_LOCK_TIME_MS + .. " - " + .. constants.MAX_TOKEN_LOCK_TIME_MS + .. " ms" + ) + balances.reduceBalance(from, qty) + local newVault = vaults.setVault(from, vaultId, { + balance = qty, + startTimestamp = currentTimestamp, + endTimestamp = currentTimestamp + lockLengthMs, + }) + return newVault + end + + --- Vaults a transfer + --- @param from string The address of the owner + --- @param recipient string The address of the recipient + --- @param qty number The quantity of tokens to vault + --- @param lockLengthMs number The lock length in milliseconds + --- @param currentTimestamp number The current timestamp + --- @param vaultId string The vault id + --- @param allowUnsafeAddresses boolean|nil Whether to allow unsafe addresses, since this results in funds eventually being sent to an invalid address + --- @param revokable boolean|nil Whether the vault is revokable. Defaults to nil + --- @return Vault -- The created vault + function vaults.vaultedTransfer( + from, + recipient, + qty, + lockLengthMs, + currentTimestamp, + vaultId, + allowUnsafeAddresses, + revokable + ) + assert(utils.isValidAddress(recipient, allowUnsafeAddresses), "Invalid recipient") + assert(qty > 0, "Quantity must be greater than 0") + assert(recipient ~= from, "Cannot transfer to self") + assert(balances.walletHasSufficientBalance(from, qty), "Insufficient balance") + assert(not vaults.getVault(recipient, vaultId), "Vault with id " .. vaultId .. " already exists") + assert( + lockLengthMs >= constants.MIN_TOKEN_LOCK_TIME_MS and lockLengthMs <= constants.MAX_TOKEN_LOCK_TIME_MS, + "Invalid lock length. Must be between " + .. constants.MIN_TOKEN_LOCK_TIME_MS + .. " - " + .. constants.MAX_TOKEN_LOCK_TIME_MS + .. " ms" + ) + + balances.reduceBalance(from, qty) + local newVault = vaults.setVault(recipient, vaultId, { + balance = qty, + startTimestamp = currentTimestamp, + endTimestamp = currentTimestamp + lockLengthMs, + controller = revokable and from or nil, + }) + return newVault + end + + --- Revokes a vaulted transfer back to the controller + ---@param controller WalletAddress The address of the controller of a revokable vault. This is the signer of the vaultedTransfer + ---@param recipient WalletAddress The address of the recipient of the vaultedTransfer + ---@param vaultId VaultId The id of the vault to revoke + ---@param currentTimestamp Timestamp The current timestamp + ---@return Vault + function vaults.revokeVault(controller, recipient, vaultId, currentTimestamp) + local vault = vaults.getVault(recipient, vaultId) + assert(vault, "Vault not found.") + assert(vault.controller == controller, "Only the controller can revoke the vault.") + assert(currentTimestamp < vault.endTimestamp, "Vault has ended.") + + balances.increaseBalance(controller, vault.balance) + Vaults[recipient][vaultId] = nil + return vault + end + + --- Extends a vault + --- @param from string The address of the owner + --- @param extendLengthMs number The extension length in milliseconds + --- @param currentTimestamp number The current timestamp + --- @param vaultId string The vault id + --- @return Vault The extended vault + function vaults.extendVault(from, extendLengthMs, currentTimestamp, vaultId) + local vault = vaults.getVault(from, vaultId) + assert(vault, "Vault not found.") + assert(currentTimestamp <= vault.endTimestamp, "Vault has ended.") + assert(extendLengthMs > 0, "Invalid extend length. Must be a positive number.") + + local totalTimeRemaining = vault.endTimestamp - currentTimestamp + local totalTimeRemainingWithExtension = totalTimeRemaining + extendLengthMs + assert( + totalTimeRemainingWithExtension <= constants.MAX_TOKEN_LOCK_TIME_MS, + "Invalid vault extension. Total lock time cannot be greater than " + .. constants.MAX_TOKEN_LOCK_TIME_MS + .. " ms" + ) + + vault.endTimestamp = vault.endTimestamp + extendLengthMs + Vaults[from][vaultId] = vault + + --- The NextPruneTimestamp might have been from this vault, but figuring out which one + --- comes next is a linear walk of the vaults anyway, so just leave it as is and the next + --- prune will figure it out. + return vault + end + + --- Increases a vault + --- @param from string The address of the owner + --- @param qty number The quantity of tokens to increase the vault by + --- @param vaultId string The vault id + --- @param currentTimestamp number The current timestamp + --- @return Vault The increased vault + function vaults.increaseVault(from, qty, vaultId, currentTimestamp) + assert(qty > 0, "Quantity must be greater than 0") + assert(balances.walletHasSufficientBalance(from, qty), "Insufficient balance") + + local vault = vaults.getVault(from, vaultId) + assert(vault, "Vault not found.") + assert(currentTimestamp <= vault.endTimestamp, "Vault has ended.") + + balances.reduceBalance(from, qty) + vault.balance = vault.balance + qty + Vaults[from][vaultId] = vault + return vault + end + + --- Gets all vaults + --- @return Vaults The vaults + function vaults.getVaults() + return utils.deepCopy(Vaults) or {} + end + + function vaults.getVaultsUnsafe() + return Vaults or {} + end + + --- @class WalletVault + --- @field address string - the wallet address that owns the vault + --- @field vaultId string - the unique id of the vault + --- @field startTimestamp number - the timestamp in ms of the vault started + --- @field endTimestamp number - the ending timestamp of the vault + --- @field balance number - the number of mARIO stored in the vault + + --- Gets all paginated vaults + --- @param cursor string|nil The address to start from + --- @param limit number Max number of results to return + --- @param sortOrder string "asc" or "desc" sort direction + --- @param sortBy string|nil "address", "vaultId", "balance", "startTimestamp", "endTimestamp" field to sort by + --- @return WalletVault[] - array of wallet vaults indexed by address and vault id + function vaults.getPaginatedVaults(cursor, limit, sortOrder, sortBy) + local allVaults = vaults.getVaultsUnsafe() + local cursorField = "vaultId" + + local vaultsArray = utils.reduce(allVaults, function(acc, address, vaultsForAddress) + for vaultId, vault in pairs(vaultsForAddress) do + table.insert(acc, { + address = address, + controller = vault.controller, + vaultId = vaultId, + balance = vault.balance, + startTimestamp = vault.startTimestamp, + endTimestamp = vault.endTimestamp, + }) + end + return acc + end, {}) + + return utils.paginateTableWithCursor(vaultsArray, cursor, cursorField, limit, sortBy or "address", sortOrder) + end + + --- Gets a vault + --- @param target string The address of the owner + --- @param id string The vault id + --- @return Vault| nil The vault + function vaults.getVault(target, id) + return Vaults[target] and Vaults[target][id] + end + + --- Removes a vault + --- @param owner string The address of the owner + --- @param id string The vault id + --- @return Vault|nil # The removed vault + function vaults.removeVault(owner, id) + if not Vaults[owner] then + return nil + end + local removedVault = utils.deepCopy(Vaults[owner][id]) + Vaults[owner][id] = nil + return removedVault + end + + --- Sets a vault + --- @param target string The address of the owner + --- @param id string The vault id + --- @param vault Vault The vault + --- @return Vault The vault + function vaults.setVault(target, id, vault) + -- create the top key first if not exists + if not Vaults[target] then + Vaults[target] = {} + end + -- set the vault + Vaults[target][id] = vault + vaults.scheduleNextVaultsPruning(vault.endTimestamp) + return vault + end + + --- Prunes expired vaults + --- @param currentTimestamp number The current timestamp + --- @return Vault[] The pruned vaults + function vaults.pruneVaults(currentTimestamp) + if not NextBalanceVaultsPruneTimestamp or currentTimestamp < NextBalanceVaultsPruneTimestamp then + -- No known pruning work to do + return {} + end + + local prunedVaults = {} + + -- reset the next prune timestamp, below will populate it with the next prune timestamp minimum + NextBalanceVaultsPruneTimestamp = nil + + -- note: use unsafe to avoid copying all the vaults, directly update the vaults table + for owner, ownersVaults in pairs(vaults.getVaultsUnsafe()) do + for id, nestedVault in pairs(ownersVaults) do + if currentTimestamp >= nestedVault.endTimestamp then + balances.increaseBalance(owner, nestedVault.balance) + prunedVaults[id] = vaults.removeVault(owner, id) + else + vaults.scheduleNextVaultsPruning(nestedVault.endTimestamp) + end + end + end + return prunedVaults + end + + --- @param timestamp Timestamp + function vaults.scheduleNextVaultsPruning(timestamp) + -- A nil NextPruneTimestamp means we're not expecting anything to prune, so set it if necessary + -- Otherwise, this new endTimestamp might be earlier than the next known for pruning. If so, set it. + NextBalanceVaultsPruneTimestamp = math.min(NextBalanceVaultsPruneTimestamp or timestamp, timestamp) + end + + function vaults.nextVaultsPruneTimestamp() + return NextBalanceVaultsPruneTimestamp + end + + return vaults +end + +_G.package.loaded[".src.vaults"] = _loaded_mod_src_vaults() + +-- module: ".src.token" +local function _loaded_mod_src_token() + local balances = require(".src.balances") + local gar = require(".src.gar") + local vaults = require(".src.vaults") + local token = {} + + --- @return mARIO # returns the last computed total supply, this is to avoid recomputing the total supply every time, and only when requested + function token.lastKnownTotalTokenSupply() + return LastKnownCirculatingSupply + + LastKnownLockedSupply + + LastKnownStakedSupply + + LastKnownDelegatedSupply + + LastKnownWithdrawSupply + + Balances[ao.id] + end + + --- @class BalanceObjectTallies + --- @field numAddressesVaulting number + --- @field numBalanceVaults number + --- @field numBalances number + + --- @class GatewayObjectTallies + --- @field numDelegateVaults number + --- @field numDelegatesVaulting number + --- @field numDelegations number + --- @field numDelegates number + --- @field numExitingDelegations number + --- @field numGatewayVaults number + --- @field numGatewaysVaulting number + --- @field numGateways number + --- @field numExitingGateways number + + --- @class StateObjectTallies : GatewayObjectTallies, BalanceObjectTallies + + --- @class TotalSupplyDetails + --- @field totalSupply number + --- @field circulatingSupply number + --- @field lockedSupply number + --- @field stakedSupply number + --- @field delegatedSupply number + --- @field withdrawSupply number + --- @field protocolBalance number + --- @field stateObjectTallies StateObjectTallies + + --- Crawls the state to compute the total supply and update the last known values + --- @return TotalSupplyDetails + function token.computeTotalSupply() + -- add all the balances + local totalSupply = 0 + local circulatingSupply = 0 + local lockedSupply = 0 + local stakedSupply = 0 + local delegatedSupply = 0 + local withdrawSupply = 0 + local protocolBalance = balances.getBalance(ao.id) + local userBalances = balances.getBalancesUnsafe() + --- @type StateObjectTallies + local stateObjectTallies = { + numAddressesVaulting = 0, + numBalanceVaults = 0, + numBalances = 0, + numDelegateVaults = 0, + numDelegatesVaulting = 0, + numDelegates = 0, + numDelegations = 0, + numExitingDelegations = 0, + numGatewayVaults = 0, + numGatewaysVaulting = 0, + numGateways = 0, + numExitingGateways = 0, + } + + -- tally circulating supply + for walletAddress, balance in pairs(userBalances) do + -- clean up 0 balances opportunistically + if balance > 0 then + circulatingSupply = circulatingSupply + balance + stateObjectTallies.numBalances = stateObjectTallies.numBalances + 1 + else + Balances[walletAddress] = nil + end + end + circulatingSupply = circulatingSupply - protocolBalance + totalSupply = totalSupply + protocolBalance + circulatingSupply + + -- tally supply stashed in gateways and delegates + local uniqueDelegates = {} + for _, gateway in pairs(gar.getGatewaysUnsafe()) do + if gateway.status == "leaving" then + stateObjectTallies.numExitingGateways = stateObjectTallies.numExitingGateways + 1 + else + stateObjectTallies.numGateways = stateObjectTallies.numGateways + 1 + end + totalSupply = totalSupply + gateway.operatorStake + gateway.totalDelegatedStake + stakedSupply = stakedSupply + gateway.operatorStake + delegatedSupply = delegatedSupply + gateway.totalDelegatedStake + for delegateAddress, delegate in pairs(gateway.delegates) do + if delegate.delegatedStake == 0 then + stateObjectTallies.numExitingDelegations = stateObjectTallies.numExitingDelegations + 1 + else + stateObjectTallies.numDelegations = stateObjectTallies.numDelegations + 1 + if not uniqueDelegates[delegateAddress] then + uniqueDelegates[delegateAddress] = true + stateObjectTallies.numDelegates = stateObjectTallies.numDelegates + 1 + end + end + + -- tally delegates' vaults + for _, vault in pairs(delegate.vaults) do + stateObjectTallies.numDelegateVaults = stateObjectTallies.numDelegateVaults + 1 + totalSupply = totalSupply + vault.balance + withdrawSupply = withdrawSupply + vault.balance + end + if next(delegate.vaults) then + stateObjectTallies.numDelegatesVaulting = stateObjectTallies.numDelegatesVaulting + 1 + end + end + -- tally gateway's own vaults + for _, vault in pairs(gateway.vaults) do + stateObjectTallies.numGatewayVaults = stateObjectTallies.numGatewayVaults + 1 + totalSupply = totalSupply + vault.balance + withdrawSupply = withdrawSupply + vault.balance + end + if next(gateway.vaults) then + stateObjectTallies.numGatewaysVaulting = stateObjectTallies.numGatewaysVaulting + 1 + end + end + + -- user vaults + local userVaults = vaults.getVaultsUnsafe() + for _, vaultsForAddress in pairs(userVaults) do + if next(vaultsForAddress) ~= nil then + stateObjectTallies.numAddressesVaulting = stateObjectTallies.numAddressesVaulting + 1 + end + -- they may have several vaults; iterate through them + for _, vault in pairs(vaultsForAddress) do + stateObjectTallies.numBalanceVaults = stateObjectTallies.numBalanceVaults + 1 + totalSupply = totalSupply + vault.balance + lockedSupply = lockedSupply + vault.balance + end + end + + LastKnownCirculatingSupply = circulatingSupply + LastKnownLockedSupply = lockedSupply + LastKnownStakedSupply = stakedSupply + LastKnownDelegatedSupply = delegatedSupply + LastKnownWithdrawSupply = withdrawSupply + TotalSupply = totalSupply + return { + totalSupply = totalSupply, + circulatingSupply = circulatingSupply, + lockedSupply = lockedSupply, + stakedSupply = stakedSupply, + delegatedSupply = delegatedSupply, + withdrawSupply = withdrawSupply, + protocolBalance = protocolBalance, + stateObjectTallies = stateObjectTallies, + } + end + + return token +end + +_G.package.loaded[".src.token"] = _loaded_mod_src_token() + +-- module: ".src.demand" +local function _loaded_mod_src_demand() + local utils = require(".src.utils") + local demand = {} + + --- @class DemandFactor + --- @field currentPeriod number The current period + --- @field trailingPeriodPurchases number[] The trailing period purchases + --- @field trailingPeriodRevenues number[] The trailing period revenues + --- @field purchasesThisPeriod number The current period purchases + --- @field revenueThisPeriod number The current period revenue + --- @field currentDemandFactor number The current demand factor + --- @field consecutivePeriodsWithMinDemandFactor number The number of consecutive periods with the minimum demand factor + --- @field fees table The fees for each name length + + --- @class DemandFactorSettings + --- @field periodZeroStartTimestamp number The timestamp of the start of period zero + --- @field movingAvgPeriodCount number The number of periods to use for the moving average + --- @field periodLengthMs number The length of a period in milliseconds + --- @field demandFactorBaseValue number The base demand factor value that is what the demand factor is reset to when fees are reset + --- @field demandFactorMin number The minimum demand factor value + --- @field demandFactorUpAdjustmentRate number The adjustment to the demand factor when it is increasing + --- @field demandFactorDownAdjustmentRate number The adjustment to the demand factor when it is decreasing + --- @field maxPeriodsAtMinDemandFactor number The threshold for the number of consecutive periods with the minimum demand factor before adjusting the demand factor + --- @field criteria 'revenue' | 'purchases' The criteria to use for determining if the demand is increasing + + --- Tally a name purchase + --- @param qty number The quantity of the purchase + function demand.tallyNamePurchase(qty) + demand.incrementPurchasesThisPeriodRevenue(1) + demand.incrementRevenueThisPeriod(qty) + end + + --- Gets the base fee for a given name length + --- @param nameLength number The length of the name + --- @return number #The base fee for the name length + function demand.baseFeeForNameLength(nameLength) + assert(utils.isInteger(nameLength) and nameLength > 0, "nameLength must be a positive integer") + local fee = demand.getFees()[nameLength] + assert(fee, "No fee found for name length: " .. nameLength) + return fee + end + + --- Gets the moving average of trailing purchase counts + --- @return number # The moving average of trailing purchase counts + function demand.mvgAvgTrailingPurchaseCounts() + local sum = 0 + local trailingPeriodPurchases = demand.getTrailingPeriodPurchases() + for i = 1, #trailingPeriodPurchases do + sum = sum + trailingPeriodPurchases[i] + end + return sum / #trailingPeriodPurchases + end + + --- Gets the moving average of trailing revenues + --- @return number # The moving average of trailing revenues + function demand.mvgAvgTrailingRevenues() + local sum = 0 + local trailingPeriodRevenues = demand.getTrailingPeriodRevenues() + for i = 1, #trailingPeriodRevenues do + sum = sum + trailingPeriodRevenues[i] + end + return sum / #trailingPeriodRevenues + end + + --- Checks if the demand is increasing + --- @return boolean # true if the demand is increasing, false otherwise + function demand.isDemandIncreasing() + local settings = demand.getSettings() + + -- check that we have settings + if not settings then + print("No settings found") + return false + end + + local purchasesInCurrentPeriod = demand.getCurrentPeriodPurchases() + local revenueInCurrentPeriod = demand.getCurrentPeriodRevenue() + local mvgAvgOfTrailingNamePurchases = demand.mvgAvgTrailingPurchaseCounts() + local mvgAvgOfTrailingRevenue = demand.mvgAvgTrailingRevenues() + + if settings.criteria == "revenue" then + return revenueInCurrentPeriod > 0 and (revenueInCurrentPeriod > mvgAvgOfTrailingRevenue) + else + return purchasesInCurrentPeriod > 0 and (purchasesInCurrentPeriod > mvgAvgOfTrailingNamePurchases) + end + end + + --- Checks if the demand should update the demand factor for a given timestamp + --- @param timestamp number The timestamp to check + --- @return boolean shouldUpdate # True if the period for the timestamp is greater than the current period, false otherwise + function demand.shouldUpdateDemandFactor(timestamp) + assert(timestamp, "Timestamp must be provided") + local settings = demand.getSettings() + + if not settings or not settings.periodZeroStartTimestamp then + return false + end + + if timestamp < settings.periodZeroStartTimestamp then + return false + end + + local calculatedPeriod = demand.getPeriodForTimestamp(timestamp) + return calculatedPeriod > demand.getCurrentPeriod() + end + + --- Gets the demand factor info + --- @return DemandFactor # The demand factor info + function demand.getDemandFactorInfo() + return utils.deepCopy(DemandFactor) + end + + --- Gets the period for the timestamp, 1 based index + --- @param timestamp number The timestamp to get the period for + --- @return number # The period for the timestamp, 1 based index + function demand.getPeriodForTimestamp(timestamp) + return math.floor( + (timestamp - demand.getSettings().periodZeroStartTimestamp) / demand.getSettings().periodLengthMs + ) + 1 + end + + --- Gets the timestamp for the period, 1 based index + --- @param period number The period to get the timestamp for + --- @return number # The timestamp for the period, 1 based index + function demand.getTimestampForPeriod(period) + return demand.getSettings().periodZeroStartTimestamp + (period - 1) * demand.getSettings().periodLengthMs + end + + --- Updates the demand factor and returns the updated demand factor to the current period. If multiple periods need to be updated, this function will call itself multiple times. + --- @param currentTimestamp number The current timestamp + --- @return number|nil demandFactor The demand factor, updated if necessary, nil if no update is necessary + --- @return table updatedDemandFactors The updated demand factors + function demand.updateDemandFactor(currentTimestamp) + assert(currentTimestamp, "Timestamp must be provided") + local settings = demand.getSettings() + local periodForCurrentTimestamp = demand.getPeriodForTimestamp(currentTimestamp) + local lastKnownPeriod = demand.getCurrentPeriod() + local updatedDemandFactors = {} --- table tracking the period and the generated demand factor for each period + + -- we didn't update the demand factor for this period, return nil to prevent a notice being sent + if periodForCurrentTimestamp == lastKnownPeriod then + return nil, {} + end + + -- update the demand factor for each period between the last known period and the current period + for periodToUpdate = lastKnownPeriod + 1, periodForCurrentTimestamp do + local timestamp = demand.getTimestampForPeriod(periodToUpdate) + if demand.shouldUpdateDemandFactor(timestamp) then + if demand.isDemandIncreasing() then + local upAdjustment = settings.demandFactorUpAdjustmentRate + local unroundedUpdatedDemandFactor = demand.getDemandFactor() * (1 + upAdjustment) + local updatedDemandFactor = utils.roundToPrecision(unroundedUpdatedDemandFactor, 5) + demand.setDemandFactor(updatedDemandFactor) + else + if demand.getDemandFactor() > settings.demandFactorMin then + local downAdjustment = settings.demandFactorDownAdjustmentRate + local unroundedUpdatedDemandFactor = demand.getDemandFactor() * (1 - downAdjustment) + local updatedDemandFactor = utils.roundToPrecision(unroundedUpdatedDemandFactor, 5) + demand.setDemandFactor(updatedDemandFactor) + end + end + + if demand.getDemandFactor() <= settings.demandFactorMin then + if demand.getConsecutivePeriodsWithMinDemandFactor() >= settings.maxPeriodsAtMinDemandFactor then + print( + settings.maxPeriodsAtMinDemandFactor + .. " consecutive periods at min demand factor. Resetting demand factor and fees." + ) + demand.updateFees(settings.demandFactorMin) + demand.setDemandFactor(settings.demandFactorBaseValue) + demand.resetConsecutivePeriodsWithMinimumDemandFactor() + else + demand.incrementConsecutivePeriodsWithMinDemandFactor(1) + end + end + + -- update the current period values in the ring buffer for previous periods + demand.updateTrailingPeriodPurchases() + demand.updateTrailingPeriodRevenues() + demand.resetPurchasesThisPeriod() + demand.resetRevenueThisPeriod() + demand.incrementCurrentPeriod(1) + table.insert( + updatedDemandFactors, + { period = lastKnownPeriod, demandFactor = demand.getDemandFactor() } + ) + end + end + + -- return the demand factor for the current period + return demand.getDemandFactor(), updatedDemandFactors + end + + --- Updates the fees + --- @param multiplier number The multiplier for the fees + --- @return table # The updated fees + function demand.updateFees(multiplier) + local currentFees = demand.getFees() + -- update all fees multiply them by the demand factor minimum + for nameLength, fee in pairs(currentFees) do + local updatedFee = fee * multiplier + DemandFactor.fees[nameLength] = updatedFee + end + return demand.getFees() + end + + --- Gets the demand factor + --- @return number # The demand factor + function demand.getDemandFactor() + return DemandFactor.currentDemandFactor + end + + --- Gets the current period revenue + --- @return number # The current period revenue + function demand.getCurrentPeriodRevenue() + return DemandFactor.revenueThisPeriod + end + + --- Gets the current period purchases + --- @return number # The current period purchases + function demand.getCurrentPeriodPurchases() + local demandFactor = utils.deepCopy(DemandFactor) + return demandFactor and demandFactor.purchasesThisPeriod or 0 + end + + --- Gets the trailing period purchases + --- @return table # The trailing period purchases + function demand.getTrailingPeriodPurchases() + local demandFactor = utils.deepCopy(DemandFactor) + return demandFactor and demandFactor.trailingPeriodPurchases or { 0, 0, 0, 0, 0, 0, 0 } + end + + --- Gets the trailing period revenues + --- @return table # The trailing period revenues + function demand.getTrailingPeriodRevenues() + local demandFactor = utils.deepCopy(DemandFactor) + return demandFactor and demandFactor.trailingPeriodRevenues or { 0, 0, 0, 0, 0, 0, 0 } + end + + --- Gets the fees + --- @return table # The fees + function demand.getFees() + local demandFactor = utils.deepCopy(DemandFactor) + return demandFactor and demandFactor.fees or {} + end + + --- Gets the settings + --- @return DemandFactorSettings # The settings + function demand.getSettings() + return utils.deepCopy(DemandFactorSettings) + end + + --- Gets the consecutive periods with minimum demand factor + --- @return number # The consecutive periods with minimum demand factor + function demand.getConsecutivePeriodsWithMinDemandFactor() + return DemandFactor.consecutivePeriodsWithMinDemandFactor + end + + --- Gets the current period + --- @return number # The current period + function demand.getCurrentPeriod() + return DemandFactor.currentPeriod + end + + --- Sets the demand factor, ensuring it is not less than the minimum demand factor + --- @param demandFactor number # The demand factor + function demand.setDemandFactor(demandFactor) + DemandFactor.currentDemandFactor = math.max(demandFactor, DemandFactorSettings.demandFactorMin) + end + + --- Gets the period index + --- @return number # The period index + function demand.getPeriodIndex() + local currentPeriod = demand.getCurrentPeriod() + local settings = demand.getSettings() + if not settings then + return 0 + end + -- current period is one based index of the current period + return (currentPeriod % settings.movingAvgPeriodCount) + 1 -- has to be + 1 to avoid zero index + end + + --- Updates the trailing period purchases + function demand.updateTrailingPeriodPurchases() + local periodIndex = demand.getPeriodIndex() + DemandFactor.trailingPeriodPurchases[periodIndex] = demand.getCurrentPeriodPurchases() + end + + --- Updates the trailing period revenues + function demand.updateTrailingPeriodRevenues() + local periodIndex = demand.getPeriodIndex() + DemandFactor.trailingPeriodRevenues[periodIndex] = demand.getCurrentPeriodRevenue() + end + + --- Resets the purchases this period + function demand.resetPurchasesThisPeriod() + DemandFactor.purchasesThisPeriod = 0 + end + + --- Resets the revenue this period + function demand.resetRevenueThisPeriod() + DemandFactor.revenueThisPeriod = 0 + end + + --- Increments the purchases this period + --- @param count number The count to increment + function demand.incrementPurchasesThisPeriodRevenue(count) + DemandFactor.purchasesThisPeriod = DemandFactor.purchasesThisPeriod + count + end + + --- Increments the revenue this period + --- @param revenue number The revenue to increment + function demand.incrementRevenueThisPeriod(revenue) + DemandFactor.revenueThisPeriod = DemandFactor.revenueThisPeriod + revenue + end + + --- Increments the current period + --- @param count number The count to increment + function demand.incrementCurrentPeriod(count) + DemandFactor.currentPeriod = DemandFactor.currentPeriod + count + end + + --- Resets the consecutive periods with minimum demand factor + function demand.resetConsecutivePeriodsWithMinimumDemandFactor() + DemandFactor.consecutivePeriodsWithMinDemandFactor = 0 + end + + --- Increments the consecutive periods with minimum demand factor + --- @param count number The count to increment + function demand.incrementConsecutivePeriodsWithMinDemandFactor(count) + DemandFactor.consecutivePeriodsWithMinDemandFactor = DemandFactor.consecutivePeriodsWithMinDemandFactor + count + end + + return demand +end + +_G.package.loaded[".src.demand"] = _loaded_mod_src_demand() + +-- module: ".src.arns" +local function _loaded_mod_src_arns() + -- arns.lua + local utils = require(".src.utils") + local constants = require(".src.constants") + local balances = require(".src.balances") + local demand = require(".src.demand") + local gar = require(".src.gar") + local arns = {} + + --- @class NameRegistry + --- @field reserved table The reserved names + --- @field records table The records + --- @field returned table The returned records + + --- @class StoredRecord + --- @field processId string The process id of the record + --- @field startTimestamp number The start timestamp of the record + --- @field type 'lease' | 'permabuy' The type of the record (lease/permabuy) + --- @field undernameLimit number The undername limit of the record + --- @field purchasePrice number The purchase price of the record + --- @field endTimestamp number|nil The end timestamp of the record + + --- @class Record : StoredRecord + --- @field name string The name of the record + + --- @class ReservedName + --- @field name string The name of the reserved record + --- @field target string|nil The address of the target of the reserved record + --- @field endTimestamp number|nil The time at which the record is no longer reserved + + --- @class ReturnedName -- Returned name saved into the registry + --- @field name string The name of the returned record + --- @field initiator WalletAddress + --- @field startTimestamp Timestamp -- The timestamp of when the record was returned + + --- @class ReturnedNameData -- Returned name with endTimestamp and premiumMultiplier + --- @field name string The name of the returned record + --- @field initiator WalletAddress + --- @field startTimestamp Timestamp -- The timestamp of when the record was returned + --- @field endTimestamp Timestamp -- The timestamp of when the record will no longer be in the returned period + --- @field premiumMultiplier number -- The current multiplier for the returned name + + --- @class ReturnedNameBuyRecordResult -- extends above + --- @field initiator WalletAddress + --- @field rewardForProtocol mARIO -- The reward for the protocol from the returned name purchase + --- @field rewardForInitiator mARIO -- The reward for the protocol from the returned name purchase + + --- @class RecordInteractionResult + --- @field record Record The updated record + --- @field baseRegistrationFee number The base registration fee + --- @field remainingBalance number The remaining balance + --- @field protocolBalance number The protocol balance + --- @field df table The demand factor info + --- @field fundingPlan FundingPlan The funding plan + --- @field fundingResult table The funding result + --- @field totalFee mARIO The total fee for the name-related operation + + --- @class BuyNameResult : RecordInteractionResult + --- @field recordsCount number The total number of records + --- @field reservedRecordsCount number The total number of reserved records + --- @field returnedName nil|ReturnedNameBuyRecordResult -- The initiator and reward details if returned name was purchased + + --- Buys a record + --- @param name string The name of the record + --- @param purchaseType string The purchase type (lease/permabuy) + --- @param years number|nil The number of years + --- @param from string The address of the sender + --- @param timestamp number The current timestamp + --- @param processId string The process id + --- @param msgId string The current message id + --- @param fundFrom string|nil The intended payment sources; one of "any", "balance", or "stake". Default "balance" + --- @param allowUnsafeProcessId boolean|nil Whether to allow unsafe processIds. Default false. + --- @return BuyNameResult # The result including relevant metadata about the purchase + function arns.buyRecord( + name, + purchaseType, + years, + from, + timestamp, + processId, + msgId, + fundFrom, + allowUnsafeProcessId + ) + fundFrom = fundFrom or "balance" + allowUnsafeProcessId = allowUnsafeProcessId or false + arns.assertValidBuyRecord(name, years, purchaseType, processId, allowUnsafeProcessId) + if purchaseType == nil then + purchaseType = "lease" -- set to lease by default + end + + if not years and purchaseType == "lease" then + years = 1 -- set to 1 year by default + end + local numYears = purchaseType == "lease" and (years or 1) or 0 + + local baseRegistrationFee = demand.baseFeeForNameLength(#name) + + local tokenCostResult = arns.getTokenCost({ + currentTimestamp = timestamp, + intent = "Buy-Name", + name = name, + purchaseType = purchaseType, + years = numYears, + from = from, + }) + + local totalFee = tokenCostResult.tokenCost + + local fundingPlan = gar.getFundingPlan(from, totalFee, fundFrom) + assert(fundingPlan and fundingPlan.shortfall == 0 or false, "Insufficient balances") + + local record = arns.getRecord(name) + local isPermabuy = record ~= nil and record.type == "permabuy" + local isActiveLease = record ~= nil + and (record.endTimestamp or 0) + constants.GRACE_PERIOD_DURATION_MS > timestamp + + assert(not isPermabuy and not isActiveLease, "Name is already registered") + + assert(not arns.getReservedName(name) or arns.getReservedName(name).target == from, "Name is reserved") + + --- @type StoredRecord + local newRecord = { + processId = processId, + startTimestamp = timestamp, + type = purchaseType, + undernameLimit = constants.DEFAULT_UNDERNAME_COUNT, + purchasePrice = totalFee, + endTimestamp = purchaseType == "lease" and timestamp + constants.yearsToMs(numYears) or nil, + } + + -- Register the leased or permanently owned name + local fundingResult = gar.applyFundingPlan(fundingPlan, msgId, timestamp) + assert(fundingResult.totalFunded == totalFee, "Funding plan application failed") + + local rewardForProtocol = totalFee + local rewardForInitiator = 0 + local returnedName = arns.getReturnedName(name) + if returnedName then + arns.removeReturnedName(name) + rewardForInitiator = returnedName.initiator ~= ao.id and math.floor(totalFee * 0.5) or 0 + rewardForProtocol = totalFee - rewardForInitiator + balances.increaseBalance(returnedName.initiator, rewardForInitiator) + end + + -- Transfer tokens to the protocol balance + balances.increaseBalance(ao.id, rewardForProtocol) + arns.addRecord(name, newRecord) + demand.tallyNamePurchase(totalFee) + return { + record = arns.getRecord(name), + totalFee = totalFee, + baseRegistrationFee = baseRegistrationFee, + remainingBalance = balances.getBalance(from), + protocolBalance = balances.getBalance(ao.id), + recordsCount = utils.lengthOfTable(NameRegistry.records), + reservedRecordsCount = utils.lengthOfTable(NameRegistry.reserved), + df = demand.getDemandFactorInfo(), + fundingPlan = fundingPlan, + fundingResult = fundingResult, + returnedName = returnedName and { + initiator = returnedName.initiator, + rewardForProtocol = rewardForProtocol, + rewardForInitiator = rewardForInitiator, + } or nil, + } + end + + --- Adds a record to the registry + --- @param name string The name of the record + --- @param record StoredRecord The record to the name registry + function arns.addRecord(name, record) + NameRegistry.records[name] = record + + -- remove reserved name if it exists in reserved + if arns.getReservedName(name) then + NameRegistry.reserved[name] = nil + end + + if record.endTimestamp then + arns.scheduleNextRecordsPrune(record.endTimestamp) + end + end + + --- Gets paginated records + --- @param cursor string|nil The cursor to paginate from + --- @param limit number The limit of records to return + --- @param sortBy string The field to sort by + --- @param sortOrder string The order to sort by + --- @param filters table|nil Optional filter criteria + --- @return PaginatedTable The paginated records + function arns.getPaginatedRecords(cursor, limit, sortBy, sortOrder, filters) + --- @type Record[] + local recordsArray = {} + local cursorField = "name" -- the cursor will be the name + for name, record in pairs(arns.getRecordsUnsafe()) do + local recordCopy = utils.deepCopy(record) + --- @diagnostic disable-next-line: inject-field + recordCopy.name = name + table.insert(recordsArray, recordCopy) + end + + return utils.paginateTableWithCursor(recordsArray, cursor, cursorField, limit, sortBy, sortOrder, filters) + end + + --- Get paginated reserved names + --- @param cursor string|nil The cursor to paginate from + --- @param limit number The limit of reserved names to return + --- @param sortBy string The field to sort by + --- @param sortOrder string The order to sort by + --- @return PaginatedTable The paginated reserved names + function arns.getPaginatedReservedNames(cursor, limit, sortBy, sortOrder) + --- @type ReservedName[] + local reservedArray = {} + local cursorField = "name" -- the cursor will be the name + for name, reservedName in pairs(arns.getReservedNamesUnsafe()) do + local reservedNameCopy = utils.deepCopy(reservedName) + reservedNameCopy.name = name + table.insert(reservedArray, reservedNameCopy) + end + return utils.paginateTableWithCursor(reservedArray, cursor, cursorField, limit, sortBy, sortOrder) + end + + --- Extends the lease for a record + --- @param from string The address of the sender + --- @param name string The name of the record + --- @param years number The number of years to extend the lease + --- @param currentTimestamp number The current timestamp + --- @param msgId string The current message id + --- @param fundFrom string|nil The intended payment sources; one of "any", "balance", or "stake". Default "balance" + --- @return RecordInteractionResult # The response including relevant metadata about the lease extension + function arns.extendLease(from, name, years, currentTimestamp, msgId, fundFrom) + fundFrom = fundFrom or "balance" + local record = arns.getRecord(name) + assert(record, "Name is not registered") + -- throw error if invalid + arns.assertValidExtendLease(record, currentTimestamp, years) + local baseRegistrationFee = demand.baseFeeForNameLength(#name) + local tokenCostResult = arns.getTokenCost({ + currentTimestamp = currentTimestamp, + intent = "Extend-Lease", + name = name, + years = years, + from = from, + }) + local totalFee = tokenCostResult.tokenCost + + local fundingPlan = gar.getFundingPlan(from, totalFee, fundFrom) + assert(fundingPlan and fundingPlan.shortfall == 0 or false, "Insufficient balances") + local fundingResult = gar.applyFundingPlan(fundingPlan, msgId, currentTimestamp) + assert(fundingResult.totalFunded == totalFee, "Funding plan application failed") + + -- modify the record with the new end timestamp + arns.modifyRecordEndTimestamp(name, record.endTimestamp + constants.yearsToMs(years)) + + -- Transfer tokens to the protocol balance + balances.increaseBalance(ao.id, totalFee) + demand.tallyNamePurchase(totalFee) + + return { + record = arns.getRecord(name), + totalFee = totalFee, + baseRegistrationFee = baseRegistrationFee, + remainingBalance = balances.getBalance(from), + protocolBalance = balances.getBalance(ao.id), + df = demand.getDemandFactorInfo(), + fundingPlan = fundingPlan, + fundingResult = fundingResult, + } + end + + --- Calculates the extension fee for a given base fee, years, and demand factor + --- @param baseFee number The base fee for the name + --- @param years number The number of years + --- @param demandFactor number The demand factor + --- @return number The extension fee + function arns.calculateExtensionFee(baseFee, years, demandFactor) + local extensionFee = arns.calculateAnnualRenewalFee(baseFee, years) + return math.floor(demandFactor * extensionFee) + end + + --- Increases the undername limit for a record + --- @param from string The address of the sender + --- @param name string The name of the record + --- @param qty number The quantity to increase the undername limit by + --- @param currentTimestamp number The current timestamp + --- @param msgId string The current message id + --- @param fundFrom string|nil The intended payment sources; one of "any", "balance", or "stake". Default "balance" + --- @return RecordInteractionResult # The result + function arns.increaseUndernameLimit(from, name, qty, currentTimestamp, msgId, fundFrom) + fundFrom = fundFrom or "balance" + -- validate record can increase undernames + local record = arns.getRecord(name) + + assert(record, "Name is not registered") + + local increaseUndernameCost = arns.getTokenCost({ + currentTimestamp = currentTimestamp, + intent = "Increase-Undername-Limit", + name = name, + quantity = qty, + type = record.type, + from = from, + }) + + assert(increaseUndernameCost.tokenCost >= 0, "Invalid undername cost") + local fundingPlan = gar.getFundingPlan(from, increaseUndernameCost.tokenCost, fundFrom) + assert(fundingPlan and fundingPlan.shortfall == 0 or false, "Insufficient balances") + local fundingResult = gar.applyFundingPlan(fundingPlan, msgId, currentTimestamp) + assert(fundingResult.totalFunded == increaseUndernameCost.tokenCost, "Funding plan application failed") + + -- update the record with the new undername count + arns.modifyRecordUndernameLimit(name, qty) + + -- Transfer tokens to the protocol balance + balances.increaseBalance(ao.id, increaseUndernameCost.tokenCost) + demand.tallyNamePurchase(increaseUndernameCost.tokenCost) + return { + record = arns.getRecord(name), + totalFee = increaseUndernameCost.tokenCost, + baseRegistrationFee = demand.baseFeeForNameLength(#name), + remainingBalance = balances.getBalance(from), + protocolBalance = balances.getBalance(ao.id), + recordsCount = utils.lengthOfTable(NameRegistry.records), + reservedRecordsCount = utils.lengthOfTable(NameRegistry.reserved), + df = demand.getDemandFactorInfo(), + fundingPlan = fundingPlan, + fundingResult = fundingResult, + } + end + + --- Gets a record + --- @param name string The name of the record + --- @return StoredRecord|nil # A deep copy of the record or nil if it does not exist + function arns.getRecord(name) + return utils.deepCopy(NameRegistry.records[name]) + end + + function arns.getProcessIdForRecord(name) + local record = arns.getRecord(name) + return record ~= nil and record.processId or nil + end + + --- Gets the active ARNS names between two timestamps + --- @param startTimestamp number The start timestamp + --- @param endTimestamp number The end timestamp + --- @return table The active ARNS names between the two timestamps + function arns.getActiveArNSNamesBetweenTimestamps(startTimestamp, endTimestamp) + local records = arns.getRecordsUnsafe() + local activeNames = {} + for name, record in pairs(records) do + if arns.recordIsActive(record, startTimestamp) and arns.recordIsActive(record, endTimestamp) then + table.insert(activeNames, name) + end + end + return activeNames + end + + --- Gets the total number of ARNS names and their status before a specific timestamp + --- @param timestamp number The timestamp to check + --- @return table The total number of ARNS names between the two timestamps + local function getRecordsStatsAtTimestamp(timestamp) + local totalActiveNames = 0 + local totalGracePeriodNames = 0 + local records = arns.getRecordsUnsafe() + + for _, record in pairs(records) do + if record.type == "permabuy" then + totalActiveNames = totalActiveNames + 1 + elseif record.type == "lease" and record.endTimestamp then + if arns.recordIsActive(record, timestamp) then + totalActiveNames = totalActiveNames + 1 + elseif arns.recordInGracePeriod(record, timestamp) then + totalGracePeriodNames = totalGracePeriodNames + 1 + end + end + end + + return { + totalActiveNames = totalActiveNames, + totalGracePeriodNames = totalGracePeriodNames, + } + end + + --- Gets the total number of reserved names that are active before a specific timestamp + --- @param timestamp number The timestamp to check + --- @return number # The total number of reserved names before the timestamp + local function getReservedNamesAtTimestamp(timestamp) + local reservedNames = arns.getReservedNamesUnsafe() + local totalReservedNames = 0 + for _, reservedName in pairs(reservedNames) do + if not reservedName.endTimestamp or reservedName.endTimestamp >= timestamp then + totalReservedNames = totalReservedNames + 1 + end + end + return totalReservedNames + end + + --- Gets the total number of returned names that are active before a specific timestamp + --- @param timestamp number The timestamp to check + --- @return number # The total number of returned names before the timestamp + local function getReturnedNamesAtTimestamp(timestamp) + local returnedNames = arns.getReturnedNamesUnsafe() + local totalReturnedNames = 0 + + for _, returnedName in pairs(returnedNames) do + if returnedName.startTimestamp + constants.RETURNED_NAME_DURATION_MS >= timestamp then + totalReturnedNames = totalReturnedNames + 1 + end + end + return totalReturnedNames + end + + --- Gets the ARNS stats at a specific timestamp + --- @param timestamp number The timestamp to check + --- @return ArNSStats # The ARNS stats at the timestamp + function arns.getArNSStatsAtTimestamp(timestamp) + local totalNames = getRecordsStatsAtTimestamp(timestamp) + local totalReservedNames = getReservedNamesAtTimestamp(timestamp) + local totalReturnedNames = getReturnedNamesAtTimestamp(timestamp) + + return { + totalActiveNames = totalNames.totalActiveNames, + totalGracePeriodNames = totalNames.totalGracePeriodNames, + totalReservedNames = totalReservedNames, + totalReturnedNames = totalReturnedNames, + } + end + + --- Gets deep copies of all records + --- @return table # A deep copy of the records table + function arns.getRecords() + local records = utils.deepCopy(NameRegistry.records) + return records or {} + end + + --- Gets all records + --- @return table # The actual records table + function arns.getRecordsUnsafe() + return NameRegistry and NameRegistry.records or {} + end + + --- Gets copies of all reserved names + --- @return table # A deep copy of the reserved names table + function arns.getReservedNames() + local reserved = utils.deepCopy(NameRegistry.reserved) + return reserved or {} + end + + --- Gets all reserved names + --- @return table # The actual reserved names table + function arns.getReservedNamesUnsafe() + return NameRegistry and NameRegistry.reserved or {} + end + + --- Gets a reserved name + --- @param name string The name of the reserved record + --- @return table|nil # A deep copy of the reserved name or nil if it does not exist + function arns.getReservedName(name) + return utils.deepCopy(NameRegistry.reserved[name]) + end + + --- Modifies the undername limit for a record + --- @param name string The name of the record + --- @param qty number The quantity to increase the undername limit by + --- @return StoredRecord|nil # The updated record + function arns.modifyRecordUndernameLimit(name, qty) + local record = arns.getRecord(name) + assert(record, "Name is not registered") + NameRegistry.records[name].undernameLimit = record.undernameLimit + qty + return arns.getRecord(name) + end + + --- Modifies the process id for a record + --- @param name string The name of the record + --- @param processId string The new process id + --- @return StoredRecord|nil # The updated record + function arns.modifyProcessId(name, processId) + local record = arns.getRecord(name) + assert(record, "Name is not registered") + NameRegistry.records[name].processId = processId + return arns.getRecord(name) + end + + --- Modifies the end timestamp for a record + --- @param name string The name of the record + --- @param newEndTimestamp number The new end timestamp + --- @return StoredRecord|nil # The updated record + function arns.modifyRecordEndTimestamp(name, newEndTimestamp) + local record = arns.getRecord(name) + assert(record, "Name is not registered") + local maxLeaseLength = constants.MAX_LEASE_LENGTH_YEARS * constants.yearsToMs(1) + local maxEndTimestamp = record.startTimestamp + maxLeaseLength + assert(newEndTimestamp <= maxEndTimestamp, "Cannot extend lease beyond 5 years") + NameRegistry.records[name].endTimestamp = newEndTimestamp + -- Guard against the invariant case where record may not expire sooner + arns.scheduleNextRecordsPrune(newEndTimestamp) + return arns.getRecord(name) + end + + ---Calculates the lease fee for a given base fee, years, and demand factor + --- @param baseFee number The base fee for the name + --- @param years number|nil The number of years + --- @param demandFactor number The demand factor + --- @return number leaseFee - the lease fee + function arns.calculateLeaseFee(baseFee, years, demandFactor) + assert(years, "Years is required for lease") + local annualRegistrationFee = arns.calculateAnnualRenewalFee(baseFee, years) + local totalLeaseCost = baseFee + annualRegistrationFee + return math.floor(demandFactor * totalLeaseCost) + end + + ---Calculates the annual renewal fee for a given base fee and years + --- @param baseFee number The base fee for the name + --- @param years number The number of years + --- @return number annualRenewalFee - the annual renewal fee + function arns.calculateAnnualRenewalFee(baseFee, years) + local totalAnnualRenewalCost = baseFee * constants.ANNUAL_PERCENTAGE_FEE * years + return math.floor(totalAnnualRenewalCost) + end + + ---Calculates the permabuy fee for a given base fee and demand factor + --- @param baseFee number The base fee for the name + --- @param demandFactor number The demand factor + --- @return number permabuyFee - the permabuy fee + function arns.calculatePermabuyFee(baseFee, demandFactor) + local permabuyPrice = baseFee + + arns.calculateAnnualRenewalFee(baseFee, constants.PERMABUY_LEASE_FEE_LENGTH_YEARS) + return math.floor(demandFactor * permabuyPrice) + end + + ---Calculates the registration fee for a given purchase type, base fee, years, and demand factor + --- @param purchaseType string The purchase type (lease/permabuy) + --- @param baseFee number The base fee for the name + --- @param years number|nil The number of years, may be empty for permabuy + --- @param demandFactor number The demand factor + --- @return number registrationFee - the registration fee + function arns.calculateRegistrationFee(purchaseType, baseFee, years, demandFactor) + assert(purchaseType == "lease" or purchaseType == "permabuy", "Invalid purchase type") + local registrationFee = purchaseType == "lease" and arns.calculateLeaseFee(baseFee, years, demandFactor) + or arns.calculatePermabuyFee(baseFee, demandFactor) + + return registrationFee + end + + ---Calculates the undername cost for a given base fee, increase quantity, registration type, years, and demand factor + --- @param baseFee number The base fee for the name + --- @param increaseQty number The increase quantity + --- @param registrationType string The registration type (lease/permabuy) + --- @param demandFactor number The demand factor + --- @return number undernameCost - the undername cost + function arns.calculateUndernameCost(baseFee, increaseQty, registrationType, demandFactor) + assert(registrationType == "lease" or registrationType == "permabuy", "Invalid registration type") + local undernamePercentageFee = registrationType == "lease" and constants.UNDERNAME_LEASE_FEE_PERCENTAGE + or constants.UNDERNAME_PERMABUY_FEE_PERCENTAGE + local totalFeeForQty = baseFee * undernamePercentageFee * increaseQty + return math.floor(demandFactor * totalFeeForQty) + end + + --- Calculates the number of years between two timestamps + --- @param startTimestamp number The start timestamp + --- @param endTimestamp number The end timestamp + --- @return number yearsBetweenTimestamps - the number of years between the two timestamps + function arns.calculateYearsBetweenTimestamps(startTimestamp, endTimestamp) + local yearsRemainingFloat = (endTimestamp - startTimestamp) / constants.yearsToMs(1) + return yearsRemainingFloat + end + + --- Asserts that a name is a valid ARNS name + --- @param name string The name to check + function arns.assertValidArNSName(name) + assert(name and type(name) == "string", "Name is required and must be a string.") + assert( + #name >= constants.MIN_NAME_LENGTH and #name <= constants.MAX_BASE_NAME_LENGTH, + "Name length is invalid. Must be between " + .. constants.MIN_NAME_LENGTH + .. " and " + .. constants.MAX_BASE_NAME_LENGTH + .. " characters." + ) + if #name == 1 then + assert( + name:match(constants.ARNS_NAME_SINGLE_CHAR_REGEX), + "Single-character name pattern for " + .. name + .. " is invalid. Must match " + .. constants.ARNS_NAME_SINGLE_CHAR_REGEX + ) + else + assert( + name:match(constants.ARNS_NAME_MULTICHARACTER_REGEX), + "Name pattern for " .. name .. " is invalid. Must match " .. constants.ARNS_NAME_MULTICHARACTER_REGEX + ) + end + end + + --- Asserts that a buy record is valid + --- @param name string The name of the record + --- @param years number|nil The number of years to check + --- @param purchaseType string|nil The purchase type to check + --- @param processId string|nil The processId of the record + --- @param allowUnsafeProcessId boolean|nil Whether to allow unsafe processIds. Default false. + function arns.assertValidBuyRecord(name, years, purchaseType, processId, allowUnsafeProcessId) + allowUnsafeProcessId = allowUnsafeProcessId or false + arns.assertValidArNSName(name) + + -- assert purchase type if present is lease or permabuy + assert( + purchaseType == nil or purchaseType == "lease" or purchaseType == "permabuy", + "Purchase-Type is invalid." + ) + + if purchaseType == "lease" or purchaseType == nil then + -- only check on leases (nil is set to lease) + -- If 'years' is present, validate it as an integer between 1 and 5 + assert( + years == nil or (type(years) == "number" and years % 1 == 0 and years >= 1 and years <= 5), + "Years is invalid. Must be an integer between 1 and 5" + ) + end + + -- assert processId is valid pattern + assert(type(processId) == "string", "Process id is required and must be a string.") + assert(utils.isValidAddress(processId, allowUnsafeProcessId), "Process Id must be a valid address.") + end + + --- Asserts that a record is valid for extending the lease + --- @param record StoredRecord The record to check + --- @param currentTimestamp number The current timestamp + --- @param years number The number of years to check + function arns.assertValidExtendLease(record, currentTimestamp, years) + assert(record.type ~= "permabuy", "Name is permanently owned and cannot be extended") + assert(not arns.recordExpired(record, currentTimestamp), "Name is expired") + + local maxAllowedYears = arns.getMaxAllowedYearsExtensionForRecord(record, currentTimestamp) + assert(years <= maxAllowedYears, "Cannot extend lease beyond 5 years") + end + + --- Calculates the maximum allowed years extension for a record + --- @param record StoredRecord The record to check + --- @param currentTimestamp number The current timestamp + --- @return number The maximum allowed years extension for the record + function arns.getMaxAllowedYearsExtensionForRecord(record, currentTimestamp) + if not record.endTimestamp then + return 0 + end + + if + currentTimestamp > record.endTimestamp + and currentTimestamp < record.endTimestamp + constants.GRACE_PERIOD_DURATION_MS + then + return constants.MAX_LEASE_LENGTH_YEARS + end + + -- TODO: should we put this as the ceiling? or should we allow people to extend as soon as it is purchased + local yearsRemainingOnLease = math.ceil((record.endTimestamp - currentTimestamp) / constants.yearsToMs(1)) + + -- a number between 0 and 5 (MAX_LEASE_LENGTH_YEARS) + return constants.MAX_LEASE_LENGTH_YEARS - yearsRemainingOnLease + end + + --- @class RegistrationFee + --- @field lease table Lease fees by year + --- @field permabuy number Cost for permanent purchase + + --- Gets the registration fees for all name lengths and years + --- @return RegistrationFee registrationFees - a table containing registration fees for each name length, with the following structure: + --- - [nameLength]: table The fees for names of this length + --- - lease: table Lease fees by year + --- - ["1"]: number Cost for 1 year lease + --- - ["2"]: number Cost for 2 year lease + --- - ["3"]: number Cost for 3 year lease + --- - ["4"]: number Cost for 4 year lease + --- - ["5"]: number Cost for 5 year lease + --- - permabuy: number Cost for permanent purchase + function arns.getRegistrationFees() + local fees = {} + local demandFactor = demand.getDemandFactor() + + for nameLength, baseFee in pairs(demand.getFees()) do + local feesForNameLength = { + lease = {}, + permabuy = 0, + } + for years = 1, constants.MAX_LEASE_LENGTH_YEARS do + feesForNameLength.lease[tostring(years)] = arns.calculateLeaseFee(baseFee, years, demandFactor) + end + feesForNameLength.permabuy = arns.calculatePermabuyFee(baseFee, demandFactor) + fees[tostring(nameLength)] = feesForNameLength + end + return fees + end + + ---@class Discount + ---@field name string The name of the discount + ---@field discountTotal number The discounted cost + ---@field multiplier number The multiplier for the discount + + ---@class TokenCostResult + ---@field tokenCost number The token cost in mARIO of the intended action + ---@field discounts table|nil The discounts applied to the token cost + ---@field returnedNameDetails table|nil The details of anything returned name in the token cost result + + --- @class IntendedAction + --- @field purchaseType 'lease' | 'permabuy'|nil The type of purchase (lease/permabuy) + --- @field years number|nil The number of years for lease + --- @field quantity number|nil The quantity for increasing undername limit + --- @field name string The name of the record + --- @field intent string The intended action type (Buy-Name/Extend-Lease/Increase-Undername-Limit/Upgrade-Name/Primary-Name-Request) + --- @field currentTimestamp number The current timestamp + --- @field from string|nil The target address of the intended action + --- @field record StoredRecord|nil The record to perform the intended action on + + --- @param intendedAction IntendedAction The intended action to get token cost for + --- @return TokenCostResult tokenCostResult The token cost result of the intended action + function arns.getTokenCost(intendedAction) + local intent = intendedAction.intent + local tokenCost = 0 + local purchaseType = intendedAction.purchaseType or "lease" + local years = tonumber(intendedAction.years) + -- We get the base name in case its a primary name request - which, because of undername primary names, can be longer than the longest arns base fee + local baseName = utils.baseNameForName(intendedAction.name) + local baseFee = demand.baseFeeForNameLength(#baseName) + local qty = tonumber(intendedAction.quantity) + local record = intendedAction.record or arns.getRecord(baseName) + local currentTimestamp = tonumber(intendedAction.currentTimestamp) + local returnedNameDetails = nil + + assert(type(intent) == "string", "Intent is required and must be a string.") + assert(type(baseName) == "string", "Name is required and must be a string.") + if intent == "Buy-Name" then + -- stub the process id as it is not required for this intent + local processId = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + arns.assertValidBuyRecord(baseName, years, purchaseType, processId, false) + tokenCost = arns.calculateRegistrationFee(purchaseType, baseFee, years, demand.getDemandFactor()) + local returnedName = arns.getReturnedNameUnsafe(baseName) + if returnedName then + local premiumMultiplier = + arns.getReturnedNamePremiumMultiplier(returnedName.startTimestamp, currentTimestamp) + returnedNameDetails = { + name = baseName, + initiator = returnedName.initiator, + startTimestamp = returnedName.startTimestamp, + endTimestamp = returnedName.startTimestamp + constants.RETURNED_NAME_DURATION_MS, + premiumMultiplier = premiumMultiplier, + basePrice = tokenCost, + } + tokenCost = math.floor(tokenCost * premiumMultiplier) + end + elseif intent == "Extend-Lease" then + assert(record, "Name is not registered") + assert(currentTimestamp, "Timestamp is required") + assert(years, "Years is required") + arns.assertValidExtendLease(record, currentTimestamp, years) + tokenCost = arns.calculateExtensionFee(baseFee, years, demand.getDemandFactor()) + elseif intent == "Increase-Undername-Limit" then + assert(record, "Name is not registered") + assert(currentTimestamp, "Timestamp is required") + assert(qty, "Quantity is required for increasing undername limit") + arns.assertValidIncreaseUndername(record, qty, currentTimestamp) + tokenCost = arns.calculateUndernameCost(baseFee, qty, record.type, demand.getDemandFactor()) + elseif intent == "Upgrade-Name" then + assert(record, "Name is not registered") + assert(currentTimestamp, "Timestamp is required") + arns.assertValidUpgradeName(record, currentTimestamp) + tokenCost = arns.calculatePermabuyFee(baseFee, demand.getDemandFactor()) + elseif intent == "Primary-Name-Request" then + -- primary name requests cost the same as a 1 undername + assert(record, "Name is not registered") + assert(currentTimestamp, "Timestamp is required") + local primaryRequestBaseFee = + demand.baseFeeForNameLength(constants.PRIMARY_NAME_REQUEST_DEFAULT_NAME_LENGTH) + tokenCost = arns.calculateUndernameCost(primaryRequestBaseFee, 1, record.type, demand.getDemandFactor()) + else + error("Invalid intent: " .. intent) + end + + local discounts = {} + + -- if the address is eligible for the ArNS discount, apply the discount + if gar.isEligibleForArNSDiscount(intendedAction.from) then + local discountTotal = math.floor(tokenCost * constants.GATEWAY_OPERATOR_ARNS_DISCOUNT_PERCENTAGE) + local discount = { + name = constants.GATEWAY_OPERATOR_ARNS_DISCOUNT_NAME, + discountTotal = discountTotal, + multiplier = constants.GATEWAY_OPERATOR_ARNS_DISCOUNT_PERCENTAGE, + } + table.insert(discounts, discount) + tokenCost = tokenCost - discountTotal + end + + -- if token Cost is less than 0, throw an error + assert(tokenCost >= 0, "Invalid token cost for " .. intendedAction.intent) + + return { + tokenCost = tokenCost, + discounts = discounts, + returnedNameDetails = returnedNameDetails, + } + end + + ---@class TokenCostAndFundingPlan + ---@field tokenCost number The token cost in mARIO of the intended action + ---@field discounts table|nil The discounts applied to the token cost + ---@field fundingPlan table|nil The funding plan for the intended action + ---@field returnedNameDetails table|nil The details of anything returned name in the token cost result + + --- Gets the token cost and funding plan for the given intent + --- @param intent string The intent to get the cost and funding plan for + --- @param name string The name to get the cost and funding plan for + --- @param years number The number of years to get the cost and funding plan for + --- @param quantity number The quantity to get the cost and funding plan for + --- @param purchaseType string The purchase type to get the cost and funding plan for + --- @param currentTimestamp number The current timestamp to get the cost and funding plan for + --- @param from string The from address to get the cost and funding plan for + --- @param fundFrom string The fund from address to get the cost and funding plan for + --- @return TokenCostAndFundingPlan tokenCostAndFundingPlan The token cost and funding plan for the given intent + function arns.getTokenCostAndFundingPlanForIntent( + intent, + name, + years, + quantity, + purchaseType, + currentTimestamp, + from, + fundFrom + ) + local tokenCostResult = arns.getTokenCost({ + intent = intent, + name = name, + years = years, + quantity = quantity, + purchaseType = purchaseType, + currentTimestamp = currentTimestamp, + from = from, + }) + local fundingPlan = fundFrom and gar.getFundingPlan(from, tokenCostResult.tokenCost, fundFrom) + return { + tokenCost = tokenCostResult.tokenCost, + fundingPlan = fundingPlan, + discounts = tokenCostResult.discounts, + returnedNameDetails = tokenCostResult.returnedNameDetails, + } + end + + --- Asserts that a name is valid for upgrading + --- @param record StoredRecord The record to check + --- @param currentTimestamp number The current timestamp + function arns.assertValidUpgradeName(record, currentTimestamp) + assert(record.type ~= "permabuy", "Name is permanently owned") + assert( + arns.recordIsActive(record, currentTimestamp) or arns.recordInGracePeriod(record, currentTimestamp), + "Name is expired" + ) + end + + --- Upgrades a leased record to permanently owned + --- @param from string The address of the sender + --- @param name string The name of the record + --- @param currentTimestamp number The current timestamp + --- @param msgId string The current message id + --- @param fundFrom string|nil The intended payment sources; one of "any", "balance", or "stakes". Default "balance" + --- @return RecordInteractionResult # the upgraded record with name and record fields + function arns.upgradeRecord(from, name, currentTimestamp, msgId, fundFrom) + fundFrom = fundFrom or "balance" + local record = arns.getRecord(name) + assert(record, "Name is not registered") + assert(currentTimestamp, "Timestamp is required") + arns.assertValidUpgradeName(record, currentTimestamp) + + local baseFee = demand.baseFeeForNameLength(#name) + local tokenCostResult = arns.getTokenCost({ + currentTimestamp = currentTimestamp, + intent = "Upgrade-Name", + name = name, + from = from, + }) + local totalFee = tokenCostResult.tokenCost + + local fundingPlan = gar.getFundingPlan(from, totalFee, fundFrom) + assert(fundingPlan and fundingPlan.shortfall == 0 or false, "Insufficient balances") + local fundingResult = gar.applyFundingPlan(fundingPlan, msgId, currentTimestamp) + assert(fundingResult.totalFunded == totalFee, "Funding plan application failed") + balances.increaseBalance(ao.id, totalFee) + demand.tallyNamePurchase(totalFee) + + record.endTimestamp = nil + -- figuring out the next prune timestamp would require a full scan of all records anyway so don't reschedule + record.type = "permabuy" + record.purchasePrice = totalFee + + NameRegistry.records[name] = record + return { + name = name, + record = record, + totalFee = totalFee, + baseRegistrationFee = baseFee, + remainingBalance = balances.getBalance(from), + protocolBalance = balances.getBalance(ao.id), + df = demand.getDemandFactorInfo(), + fundingPlan = fundingPlan, + fundingResult = fundingResult, + } + end + + --- Checks if a record is in the grace period + --- @param record StoredRecord The record to check + --- @param timestamp number The timestamp to check + --- @return boolean isInGracePeriod True if the record is in the grace period, false otherwise (active or expired) + function arns.recordInGracePeriod(record, timestamp) + return record.endTimestamp + and record.endTimestamp < timestamp + and record.endTimestamp + constants.GRACE_PERIOD_DURATION_MS > timestamp + or false + end + + --- Checks if a record is expired + --- @param record StoredRecord The record to check + --- @param timestamp number The timestamp to check + --- @return boolean isExpired True if the record is expired, false otherwise (active or in grace period) + function arns.recordExpired(record, timestamp) + if record.type == "permabuy" then + return false + end + local isActive = arns.recordIsActive(record, timestamp) + local inGracePeriod = arns.recordInGracePeriod(record, timestamp) + local expired = not isActive and not inGracePeriod + return expired + end + + --- Checks if a record is active + --- @param record StoredRecord The record to check + --- @param timestamp number The timestamp to check + --- @return boolean isActive True if the record is active, false otherwise (expired or in grace period) + function arns.recordIsActive(record, timestamp) + if record.type == "permabuy" then + return true + end + + -- to avoid pruning on forked state for records that start int he future, only check the end timestamp against the current timestamp + return record.endTimestamp and record.endTimestamp >= timestamp or false + end + + --- Asserts that a record is valid for increasing the undername limit + --- @param record StoredRecord The record to check + --- @param qty number The quantity to check + --- @param currentTimestamp number The current timestamp + function arns.assertValidIncreaseUndername(record, qty, currentTimestamp) + assert(arns.recordIsActive(record, currentTimestamp), "Name must be active to increase undername limit") + assert(qty > 0 and utils.isInteger(qty), "Qty is invalid") + end + + --- Adds name to the recently returned name list + --- @param name string The name of the returned name + --- @param timestamp number The timestamp of the release + --- @param initiator string The address of the initiator + --- @returns ReturnedName + function arns.createReturnedName(name, timestamp, initiator) + assert( + not arns.getRecord(name), + "Name is registered. Returned names can only be created for unregistered names." + ) + assert( + not arns.getReservedName(name), + "Name is reserved. Returned names can only be created for unregistered names." + ) + assert(not arns.getReturnedNameUnsafe(name), "Returned name already exists") + local returnedName = { + name = name, + startTimestamp = timestamp, + initiator = initiator, + } + NameRegistry.returned[name] = returnedName + arns.scheduleNextReturnedNamesPrune(timestamp + constants.RETURNED_NAME_DURATION_MS) + return returnedName + end + + --- Gets a returned name + --- @param name string The name of the returned name + --- @return ReturnedName|nil + function arns.getReturnedNameUnsafe(name) + return NameRegistry.returned[name] + end + + --- Gets a returned name as a deep copy + --- @param name string The name of the returned name + --- @return ReturnedName|nil + function arns.getReturnedName(name) + return utils.deepCopy(arns.getReturnedNameUnsafe(name)) + end + + --- Gets all returned names + --- @return table returnedNames - the returned names + function arns.getReturnedNamesUnsafe() + return NameRegistry.returned or {} + end + + function arns.getReturnedNamePremiumMultiplier(startTimestamp, currentTimestamp) + assert(currentTimestamp >= startTimestamp, "Current timestamp must be after the start timestamp") + assert( + currentTimestamp < startTimestamp + constants.RETURNED_NAME_DURATION_MS, + "Current timestamp is after the returned name period" + ) + local timestampDiff = currentTimestamp - startTimestamp + -- The percentage of the period that has passed e.g: 0.5 if half the period has passed + local percentageOfReturnedNamePeriodPassed = timestampDiff / constants.RETURNED_NAME_DURATION_MS + -- Take the inverse so that a fresh returned name has the full multiplier, and a name almost expired has a multiplier close to base price + local pctOfReturnPeriodRemaining = 1 - percentageOfReturnedNamePeriodPassed + + return constants.RETURNED_NAME_MAX_MULTIPLIER * pctOfReturnPeriodRemaining + end + + --- Removes an returnedName by name + --- @param name string The name of the returnedName + --- @return ReturnedName|nil returnedName - the returnedName instance + function arns.removeReturnedName(name) + local returnedName = arns.getReturnedName(name) + NameRegistry.returned[name] = nil + return returnedName + end + + --- Removes a record by name + --- @param name string The name of the record + --- @return Record|nil record - the record instance + function arns.removeRecord(name) + local record = NameRegistry.records[name] + NameRegistry.records[name] = nil + return record + end + + --- Removes a reserved name by name + --- @param name string The name of the reserved name + --- @return ReservedName|nil reservedName - the reserved name instance + function arns.removeReservedName(name) + local reserved = NameRegistry.reserved[name] + NameRegistry.reserved[name] = nil + return reserved + end + + --- Prunes records that have expired + --- @param currentTimestamp number The current timestamp + --- @param lastGracePeriodEntryEndTimestamp number|nil The end timestamp of the last known record to have entered its grace period + --- @return table prunedRecords - the pruned records + --- @return table recordsInGracePeriod - the records that have entered their grace period + function arns.pruneRecords(currentTimestamp, lastGracePeriodEntryEndTimestamp) + lastGracePeriodEntryEndTimestamp = lastGracePeriodEntryEndTimestamp or 0 + local prunedRecords = {} + local newGracePeriodRecords = {} + if not NextRecordsPruneTimestamp or NextRecordsPruneTimestamp > currentTimestamp then + return prunedRecords, newGracePeriodRecords + end + + -- identify any records that are leases and that have expired, account for a two week grace period in seconds + NextRecordsPruneTimestamp = nil + + -- note: use unsafe to avoid copying all the records, but be careful not to modify the records directly here + for name, record in pairs(arns.getRecordsUnsafe()) do + if arns.recordExpired(record, currentTimestamp) then + print("Pruning record " .. name .. " because it has expired") + prunedRecords[name] = arns.removeRecord(name) + elseif arns.recordInGracePeriod(record, currentTimestamp) then + if record.endTimestamp > lastGracePeriodEntryEndTimestamp then + print( + "Adding record " + .. name + .. " to new grace period records because it has entered its grace period" + ) + newGracePeriodRecords[name] = record + end + -- Make sure we prune when the grace period is over + arns.scheduleNextRecordsPrune(record.endTimestamp + constants.GRACE_PERIOD_DURATION_MS) + elseif record.endTimestamp then + arns.scheduleNextRecordsPrune(record.endTimestamp) + end + end + return prunedRecords, newGracePeriodRecords + end + + --- Prunes returned names that have expired + --- @param currentTimestamp number The current timestamp + --- @return ReturnedName[] prunedReturnedNames - the pruned returned names + function arns.pruneReturnedNames(currentTimestamp) + local prunedReturnedNames = {} + if not NextReturnedNamesPruneTimestamp or currentTimestamp < NextReturnedNamesPruneTimestamp then + -- No known returned names to prune + return prunedReturnedNames + end + + -- reset the next prune timestamp, below will populate it with the next prune timestamp minimum + NextReturnedNamesPruneTimestamp = nil + + -- note: use unsafe to avoid copying all the returned names, but be careful not to modify the returned names directly here + for name, returnedName in pairs(arns.getReturnedNamesUnsafe()) do + local endTimestamp = returnedName.startTimestamp + constants.RETURNED_NAME_DURATION_MS + if currentTimestamp >= endTimestamp then + prunedReturnedNames[name] = arns.removeReturnedName(name) + else + arns.scheduleNextReturnedNamesPrune(endTimestamp) + end + end + return prunedReturnedNames + end + + --- Prunes reserved names that have expired + --- @param currentTimestamp number The current timestamp + --- @return ReservedName[] prunedReservedNames - the pruned reserved names + function arns.pruneReservedNames(currentTimestamp) + local prunedReserved = {} + + -- note: use unsafe to avoid copying all the reserved names, but be careful not to modify the reserved names directly here + for name, details in pairs(arns.getReservedNamesUnsafe()) do + if details.endTimestamp and details.endTimestamp <= currentTimestamp then + prunedReserved[name] = arns.removeReservedName(name) + end + end + return prunedReserved + end + + --- Asserts that a name can be reassigned + --- @param record StoredRecord | nil The record to check + --- @param currentTimestamp number The current timestamp + --- @param from string The address of the sender + --- @param newProcessId string The new process id + --- @param allowUnsafeProcessId boolean|nil Whether to allow unsafe processIds. Default false. + function arns.assertValidReassignName(record, currentTimestamp, from, newProcessId, allowUnsafeProcessId) + allowUnsafeProcessId = allowUnsafeProcessId or false + assert(record, "Name is not registered") + assert(currentTimestamp, "Timestamp is required") + assert(utils.isValidAddress(newProcessId, allowUnsafeProcessId), "Invalid Process-Id") + assert(record.processId == from, "Not authorized to reassign this name") + + if record.endTimestamp then + assert( + not arns.recordInGracePeriod(record, currentTimestamp), + "Name must be extended before it can be reassigned" + ) + assert(not arns.recordExpired(record, currentTimestamp), "Name is expired") + end + + return true + end + + --- Reassigns a name + --- @param name string The name of the record + --- @param from string The address of the sender + --- @param currentTimestamp number The current timestamp + --- @param newProcessId string The new process id + --- @param allowUnsafeProcessId boolean|nil Whether to allow unsafe processIds. Default false. + --- @return StoredRecord|nil updatedRecord - the updated record + function arns.reassignName(name, from, currentTimestamp, newProcessId, allowUnsafeProcessId) + allowUnsafeProcessId = allowUnsafeProcessId or false + local record = arns.getRecord(name) + arns.assertValidReassignName(record, currentTimestamp, from, newProcessId, allowUnsafeProcessId) + local updatedRecord = arns.modifyProcessId(name, newProcessId) + return updatedRecord + end + + --- @param timestamp Timestamp + function arns.scheduleNextRecordsPrune(timestamp) + NextRecordsPruneTimestamp = math.min(NextRecordsPruneTimestamp or timestamp, timestamp) + end + + --- @param timestamp Timestamp + function arns.scheduleNextReturnedNamesPrune(timestamp) + NextReturnedNamesPruneTimestamp = math.min(NextReturnedNamesPruneTimestamp or timestamp, timestamp) + end + + function arns.nextRecordsPruneTimestamp() + return NextRecordsPruneTimestamp + end + + function arns.nextReturnedNamesPruneTimestamp() + return NextReturnedNamesPruneTimestamp + end + + return arns +end + +_G.package.loaded[".src.arns"] = _loaded_mod_src_arns() + +-- module: ".src.epochs" +local function _loaded_mod_src_epochs() + local epochs = {} + local gar = require(".src.gar") + local utils = require(".src.utils") + local balances = require(".src.balances") + local arns = require(".src.arns") + + -- note: crypto is provided by the module so we reference it relative to process.lua in the module + local crypto = require(".crypto.init") + + --- @alias ObserverAddress string + --- @alias DelegateAddress string + --- @alias TransactionId string + + --- @class PrescribedEpoch + --- @field hashchain string The hashchain of the epoch + --- @field epochIndex number The index of the epoch + --- @field startTimestamp number The start timestamp of the epoch + --- @field endTimestamp number The end timestamp of the epoch + --- @field startHeight number The start height of the epoch + --- @field arnsStats ArNSStats The ArNS stats for the epoch + --- @field prescribedObservers table The prescribed observers of the epoch + --- @field prescribedNames string[] The prescribed names of the epoch + --- @field distributions PrescribedEpochDistribution The distributions of the epoch + --- @field observations Observations The observations of the epoch + + --- @class ArNSStats # The ArNS stats for an epoch + --- @field totalActiveNames number The total active ArNS names + --- @field totalGracePeriodNames number The total grace period ArNS names + --- @field totalReservedNames number The total reserved ArNS names + --- @field totalReturnedNames number The total returned ArNS names + + --- @class DistributedEpoch : PrescribedEpoch + --- @field distributions DistributedEpochDistribution The rewards of the epoch + + --- @class EpochSettings + --- @field prescribedNameCount number The number of prescribed names + --- @field rewardPercentage number The reward percentage + --- @field maxObservers number The maximum number of observers + --- @field epochZeroStartTimestamp number The start timestamp of epoch zero + --- @field durationMs number The duration of an epoch in milliseconds + + --- @class WeightedGateway + --- @field gatewayAddress string The gateway address + --- @field observerAddress string The observer address + --- @field stakeWeight number The stake weight + --- @field tenureWeight number The tenure weight + --- @field gatewayPerformanceRatio number The gateway reward ratio weight + --- @field observerPerformanceRatio number The observer reward ratio weight + --- @field compositeWeight number The composite weight + --- @field normalizedCompositeWeight number The normalized composite weight + + --- @class Observations + --- @field failureSummaries table The failure summaries + --- @field reports Reports The reports for the epoch (indexed by observer address) + + --- @alias Reports table + + --- @class GatewayRewards + --- @field operatorReward number The total operator reward eligible + --- @field delegateRewards table The delegate rewards eligible, indexed by delegate address + + --- @class PrescribedEpochRewards + --- @field eligible table The eligible rewards + + --- @class DistributedEpochRewards: PrescribedEpochRewards + --- @field distributed table The distributed rewards + + --- @class PrescribedEpochDistribution + --- @field totalEligibleGateways number The total eligible gateways + --- @field totalEligibleRewards number The total eligible rewards + --- @field totalEligibleGatewayReward number The total eligible gateway reward + --- @field totalEligibleObserverReward number The total eligible observer reward + --- @field rewards PrescribedEpochRewards The rewards for the epoch, including eligible and distributed rewards + + --- @class DistributedEpochDistribution: PrescribedEpochDistribution + --- @field distributedTimestamp number The distributed timestamp + --- @field totalDistributedRewards number The total distributed rewards + --- @field rewards DistributedEpochRewards The rewards for the epoch, including eligible and distributed rewards + + --- Gets an epoch by index + --- @param epochIndex number The epoch index + --- @return PrescribedEpoch | nil # The prescribed epoch + function epochs.getEpoch(epochIndex) + if epochIndex < 0 then + return nil + end + local epoch = utils.deepCopy(Epochs[epochIndex]) or nil + return epoch + end + + -- Gets an epoch by index, unsafe + --- @param epochIndex number The epoch index + --- @return PrescribedEpoch | nil # The prescribed epoch + function epochs.getEpochUnsafe(epochIndex) + return Epochs[epochIndex] + end + + --- Gets the epoch settings + --- @return EpochSettings # The epoch settings + function epochs.getSettings() + return utils.deepCopy(EpochSettings) + end + + --- Gets the raw prescribed observers for an epoch + --- @param epochIndex number The epoch index + --- @return table # The prescribed observers for the epoch + function epochs.getPrescribedObserversForEpoch(epochIndex) + if epochIndex < 0 then + return {} + end + local epoch = epochs.getEpoch(epochIndex) + return epoch and epoch.prescribedObservers or {} + end + + --- Get prescribed observers with weights for epoch + --- @param epochIndex number The epoch index + --- @return WeightedGateway[] # The prescribed observers with weights for the epoch + function epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) + local prescribedObservers = epochs.getPrescribedObserversForEpoch(epochIndex) + -- Iterate over prescribed observers and add gateway details + local prescribedObserversWithWeights = {} + for _, gatewayAddress in pairs(prescribedObservers) do + local gateway = gar.getGatewayUnsafe(gatewayAddress) + if gateway then + table.insert(prescribedObserversWithWeights, { + observerAddress = gateway.observerAddress, + gatewayAddress = gatewayAddress, + normalizedCompositeWeight = gateway.weights.normalizedCompositeWeight, + stakeWeight = gateway.weights.stakeWeight, + tenureWeight = gateway.weights.tenureWeight, + gatewayPerformanceRatio = gateway.weights.gatewayPerformanceRatio, + observerPerformanceRatio = gateway.weights.observerPerformanceRatio, + compositeWeight = gateway.weights.compositeWeight, + stake = gateway.operatorStake, + startTimestamp = gateway.startTimestamp, + }) + end + end + + -- sort by normalizedCompositeWeight + table.sort(prescribedObserversWithWeights, function(a, b) + return a.normalizedCompositeWeight > b.normalizedCompositeWeight + end) + return prescribedObserversWithWeights + end + + --- Gets the observations for an epoch + --- @param epochIndex number The epoch index + --- @return Observations # The observations for the epoch + function epochs.getObservationsForEpoch(epochIndex) + if epochIndex < 0 then + return {} + end + local epoch = epochs.getEpoch(epochIndex) + return epoch and epoch.observations or {} + end + + --- Gets the distributions for an epoch + --- @param epochIndex number The epoch index + --- @return PrescribedEpochDistribution # The distributions for the epoch + function epochs.getDistributionsForEpoch(epochIndex) + if epochIndex < 0 then + return {} + end + local epoch = epochs.getEpoch(epochIndex) + return epoch and epoch.distributions or {} + end + + --- Gets the prescribed names for an epoch + --- @param epochIndex number The epoch index + --- @return string[] # The prescribed names for the epoch + function epochs.getPrescribedNamesForEpoch(epochIndex) + if epochIndex < 0 then + return {} + end + local epoch = epochs.getEpoch(epochIndex) + return epoch and epoch.prescribedNames or {} + end + + --- Gets the reports for an epoch + --- @param epochIndex number The epoch index + --- @return table # The reports for the epoch + function epochs.getReportsForEpoch(epochIndex) + if epochIndex < 0 then + return {} + end + local epoch = epochs.getEpoch(epochIndex) + return epoch and epoch.observations.reports or {} + end + + --- Computes the prescribed names for an epoch + --- @param epochIndex number The epoch index + --- @param hashchain string The hashchain + --- @return string[] # The prescribed names for the epoch + function epochs.computePrescribedNamesForEpoch(epochIndex, hashchain) + if epochIndex < 0 then + return {} + end + local epochStartTimestamp, epochEndTimestamp = epochs.getEpochTimestampsForIndex(epochIndex) + local activeArNSNames = arns.getActiveArNSNamesBetweenTimestamps(epochStartTimestamp, epochEndTimestamp) + + -- sort active records by name and hashchain + table.sort(activeArNSNames, function(nameA, nameB) + local nameAHash = utils.getHashFromBase64URL(nameA) + local nameBHash = utils.getHashFromBase64URL(nameB) + local nameAString = crypto.utils.array.toString(nameAHash) + local nameBString = crypto.utils.array.toString(nameBHash) + return nameAString < nameBString + end) + + if #activeArNSNames <= epochs.getSettings().prescribedNameCount then + return activeArNSNames + end + + local epochHash = utils.getHashFromBase64URL(hashchain) + local prescribedNamesLookup = {} + local hash = epochHash + while utils.lengthOfTable(prescribedNamesLookup) < epochs.getSettings().prescribedNameCount do + local hashString = crypto.utils.array.toString(hash) + local random = crypto.random(nil, nil, hashString) % #activeArNSNames + + for i = 0, #activeArNSNames do + local index = (random + i) % #activeArNSNames + 1 + local alreadyPrescribed = prescribedNamesLookup[activeArNSNames[index]] ~= nil + if not alreadyPrescribed then + prescribedNamesLookup[activeArNSNames[index]] = true + break + end + end + + -- hash the hash to get a new hash + local newHash = crypto.utils.stream.fromArray(hash) + hash = crypto.digest.sha2_256(newHash).asBytes() + end + + local prescribedNames = utils.getTableKeys(prescribedNamesLookup) + + -- sort them by name + table.sort(prescribedNames, function(a, b) + return a < b + end) + return prescribedNames + end + + --- Computes the prescribed observers for an epoch + --- @param epochIndex number The epoch index + --- @param hashchain string The hashchain + --- @return table, WeightedGateway[] # The prescribed observers for the epoch, and all the gateways with weights + function epochs.computePrescribedObserversForEpoch(epochIndex, hashchain) + assert(epochIndex >= 0, "Epoch index must be greater than or equal to 0") + assert(type(hashchain) == "string", "Hashchain must be a string") + + local epochStartTimestamp = epochs.getEpochTimestampsForIndex(epochIndex) + local activeGatewayAddresses = gar.getActiveGatewayAddressesBeforeTimestamp(epochStartTimestamp) + local weightedGateways = gar.getGatewayWeightsAtTimestamp(activeGatewayAddresses, epochStartTimestamp) + + -- Filter out any observers that could have a normalized composite weight of 0 + local filteredObservers = {} + local prescribedObserversLookup = {} + -- use ipairs as weightedObservers in array + for _, observer in ipairs(weightedGateways) do + -- for the first epoch, we need to include all observers as there are no weights yet + if epochIndex == 0 or observer.normalizedCompositeWeight > 0 then + table.insert(filteredObservers, observer) + end + end + if #filteredObservers <= epochs.getSettings().maxObservers then + for _, observer in ipairs(filteredObservers) do + prescribedObserversLookup[observer.observerAddress] = observer.gatewayAddress + end + return prescribedObserversLookup, weightedGateways + end + + -- the hash we will use to create entropy for prescribed observers + local epochHash = utils.getHashFromBase64URL(hashchain) + + -- sort the observers using entropy from the hash chain, this will ensure that the same observers are selected for the same epoch + table.sort(filteredObservers, function(observerA, observerB) + local addressAHash = utils.getHashFromBase64URL(observerA.gatewayAddress .. hashchain) + local addressBHash = utils.getHashFromBase64URL(observerB.gatewayAddress .. hashchain) + local addressAString = crypto.utils.array.toString(addressAHash) + local addressBString = crypto.utils.array.toString(addressBHash) + return addressAString < addressBString + end) + + -- get our prescribed observers, using the hashchain as entropy + local hash = epochHash + while utils.lengthOfTable(prescribedObserversLookup) < epochs.getSettings().maxObservers do + local hashString = crypto.utils.array.toString(hash) + local random = crypto.random(nil, nil, hashString) / 0xffffffff + local cumulativeNormalizedCompositeWeight = 0 + for _, observer in ipairs(filteredObservers) do + local alreadyPrescribed = prescribedObserversLookup[observer.observerAddress] + -- add only if observer has not already been prescribed + if not alreadyPrescribed then + -- add the observers normalized composite weight to the cumulative weight + cumulativeNormalizedCompositeWeight = cumulativeNormalizedCompositeWeight + + observer.normalizedCompositeWeight + -- if the random value is less than the cumulative weight, we have found our observer + if random <= cumulativeNormalizedCompositeWeight then + prescribedObserversLookup[observer.observerAddress] = observer.gatewayAddress + break + end + end + end + -- hash the hash to get a new hash + local newHash = crypto.utils.stream.fromArray(hash) + hash = crypto.digest.sha2_256(newHash).asBytes() + end + -- return the prescribed observers and the weighted observers + return prescribedObserversLookup, weightedGateways + end + + --- Gets the epoch timestamps for an epoch index. Epochs are 0-indexed. + --- @param epochIndex number The epoch index + --- @return number, number # The epoch start timestamp, epoch end timestamp + function epochs.getEpochTimestampsForIndex(epochIndex) + if epochIndex < 0 then + return 0, 0 + end + local epochStartTimestamp = epochs.getSettings().epochZeroStartTimestamp + + epochs.getSettings().durationMs * epochIndex + local epochEndTimestamp = epochStartTimestamp + epochs.getSettings().durationMs + return epochStartTimestamp, epochEndTimestamp + end + + --- Gets the epoch index for a given timestamp. Epochs are 0-indexed. + --- @param timestamp number The timestamp + --- @return number # The epoch index + function epochs.getEpochIndexForTimestamp(timestamp) + local timestampInMS = utils.checkAndConvertTimestampToMs(timestamp) + local epochZeroStartTimestamp = epochs.getSettings().epochZeroStartTimestamp + local epochLengthMs = epochs.getSettings().durationMs + local epochIndex = math.floor((timestampInMS - epochZeroStartTimestamp) / epochLengthMs) + return epochIndex + end + + --- Creates a new epoch and updates the gateway weights + --- + --- TODO: if we are asked to create an epoch to catch up, we should stub out the prescribed observers and names. Non-critical. + --- @param currentTimestamp number The current timestamp in milliseconds + --- @param currentBlockHeight number The current block height + --- @param currentHashchain string The current hashchain + --- @return PrescribedEpoch | nil # The created epoch, or nil if an epoch already exists for the index + function epochs.createAndPrescribeNewEpoch(currentTimestamp, currentBlockHeight, currentHashchain) + assert(type(currentTimestamp) == "number", "Timestamp must be a number") + assert(type(currentBlockHeight) == "number", "Block height must be a number") + assert(type(currentHashchain) == "string", "Hashchain must be a string") + + -- if before the epoch zero start timestamp, return nil + if currentTimestamp < epochs.getSettings().epochZeroStartTimestamp then + print("Genesis epoch will start at: " .. epochs.getSettings().epochZeroStartTimestamp) + return nil + end + + local currentEpochIndex = epochs.getEpochIndexForTimestamp(currentTimestamp) + if epochs.getEpoch(currentEpochIndex) then + print("Epoch already exists for index, skipping creation: " .. currentEpochIndex) + return nil -- do not return the existing epoch to prevent sending redundant epoch-created-notices + end + + print("Creating new epoch: " .. currentEpochIndex) + + -- get the max rewards for each participant eligible for the epoch + local prescribedObservers, updatedGatewaysWithWeights = + epochs.computePrescribedObserversForEpoch(currentEpochIndex, currentHashchain) + local prescribedNames = epochs.computePrescribedNamesForEpoch(currentEpochIndex, currentHashchain) + local eligibleEpochRewards = epochs.computeTotalEligibleRewardsForEpoch(currentEpochIndex, prescribedObservers) + local epochStartTimestamp, epochEndTimestamp = epochs.getEpochTimestampsForIndex(currentEpochIndex) + local arnsStatsAtEpochStart = arns.getArNSStatsAtTimestamp(epochStartTimestamp) + + -- always update the gateway weights to the latest computed weights when we create a new epoch + for _, weightedGateway in ipairs(updatedGatewaysWithWeights) do + gar.updateGatewayWeights(weightedGateway) + end + + --- @type PrescribedEpoch + local epoch = { + hashchain = currentHashchain, + epochIndex = currentEpochIndex, + startTimestamp = epochStartTimestamp, + endTimestamp = epochEndTimestamp, + startHeight = currentBlockHeight, + arnsStats = arnsStatsAtEpochStart, + prescribedObservers = prescribedObservers, + prescribedNames = prescribedNames, + observations = { + failureSummaries = {}, + reports = {}, + }, + distributions = { + totalEligibleRewards = eligibleEpochRewards.totalEligibleRewards, + totalEligibleGatewayReward = eligibleEpochRewards.perGatewayReward, + totalEligibleObserverReward = eligibleEpochRewards.perObserverReward, + totalEligibleGateways = eligibleEpochRewards.totalEligibleGateways, + rewards = { + eligible = eligibleEpochRewards.potentialRewards, + }, + }, + } + Epochs[currentEpochIndex] = epoch + + return epoch + end + + --- Saves the observations for an epoch + --- @param observerAddress string The observer address + --- @param reportTxId string The report transaction ID + --- @param failedGatewayAddresses table The failed gateway addresses + --- @param epochIndex number The epoch index + --- @param currentTimestamp number The current timestamp + --- @return Observations # The updated observations for the epoch + function epochs.saveObservations(observerAddress, reportTxId, failedGatewayAddresses, epochIndex, currentTimestamp) + -- Note: one of the only places we use arweave addresses, as the protocol requires the report to be stored on arweave. This would be a significant change to OIP if changed. + assert(utils.isValidArweaveAddress(reportTxId), "Report transaction ID is not a valid address") + assert(utils.isValidAddress(observerAddress, true), "Observer address is not a valid address") -- allow unsafe addresses for observer address + assert(type(failedGatewayAddresses) == "table", "Failed gateway addresses is required") + for _, address in ipairs(failedGatewayAddresses) do + assert(utils.isValidAddress(address, true), "Failed gateway address is not a valid address") -- allow unsafe addresses for failed gateway addresses + end + assert(epochIndex >= 0, "Epoch index must be greater than 0") + assert(type(currentTimestamp) == "number", "Timestamp is required") + + local epochStartTimestamp, epochEndTimestamp = epochs.getEpochTimestampsForIndex(epochIndex) + + -- avoid observations before the previous epoch distribution has occurred, as distributions affect weights of the current epoch + assert( + currentTimestamp > epochStartTimestamp, + "Observations for epoch " .. epochIndex .. " must be submitted after " .. epochStartTimestamp + ) + assert( + currentTimestamp < epochEndTimestamp, + "Observations for epoch " .. epochIndex .. " must be submitted before " .. epochEndTimestamp + ) + + local prescribedObserversLookup = epochs.getPrescribedObserversForEpoch(epochIndex) + assert(utils.lengthOfTable(prescribedObserversLookup) > 0, "No prescribed observers for the current epoch.") + + local gatewayAddressForObserver = prescribedObserversLookup[observerAddress] + assert(gatewayAddressForObserver, "Caller is not a prescribed observer for the current epoch.") + + local observingGateway = gar.getGateway(gatewayAddressForObserver) + assert(observingGateway, "The associated gateway not found in the registry.") + + -- we'll be updating the epoch, so get a direct reference to it + local epoch = epochs.getEpochUnsafe(epochIndex) + assert(epoch, "Unable to save observation. Epoch not found for index: " .. epochIndex) + + -- check if this is the first report filed in this epoch + if epoch.observations == nil then + epoch.observations = { + failureSummaries = {}, + reports = {}, + } + end + + -- use ipairs as failedGatewayAddresses is an array + for _, failedGatewayAddress in ipairs(failedGatewayAddresses) do + -- we're not updating the gateway, so we can use getGatewayUnsafe without fear of overwriting the gateway + local gateway = gar.getGatewayUnsafe(failedGatewayAddress) + + if gateway then + local gatewayPresentDuringEpoch = gar.isGatewayActiveBeforeTimestamp(epochStartTimestamp, gateway) + if gatewayPresentDuringEpoch then + -- if there are none, create an array + if epoch.observations.failureSummaries == nil then + epoch.observations.failureSummaries = {} + end + -- Get the existing set of failed gateways for this observer + local observersMarkedFailed = epoch.observations.failureSummaries[failedGatewayAddress] or {} + + -- if list of observers who marked failed does not continue current observer than add it + local alreadyObservedIndex = utils.findInArray(observersMarkedFailed, function(address) + return address == observingGateway.observerAddress + end) + + if not alreadyObservedIndex then + table.insert(observersMarkedFailed, observingGateway.observerAddress) + end + + epoch.observations.failureSummaries[failedGatewayAddress] = observersMarkedFailed + end + end + end + + -- if reports are not already present, create an array + if epoch.observations.reports == nil then + epoch.observations.reports = {} + end + + -- update the epoch + epoch.observations.reports[observingGateway.observerAddress] = reportTxId + return epoch.observations + end + + --- @class DistributionSettings + --- @field gatewayOperatorRewardRate number The gateway operator reward ratio + --- @field observerRewardRate number The observer reward ratio + --- @field rewardDecayStartEpoch number The reward decay start epoch + --- @field rewardDecayLastEpoch number The reward decay last epoch + --- @field maximumRewardRate number The maximum reward rate + --- @field minimumRewardRate number The minimum reward rate + function epochs.getDistributionSettings() + return utils.deepCopy(DistributionSettings) + end + + --- @class ComputedRewards + --- @field totalEligibleGateways number The total eligible gateways + --- @field totalEligibleRewards number The total eligible rewards + --- @field perGatewayReward number The per gateway reward + --- @field perObserverReward number The per observer reward + --- @field potentialRewards table The potential rewards for each gateway + + --- Computes the total eligible rewards for an epoch based on the protocol balance and the reward percentage and prescribed observers + --- @param epochIndex number The epoch index + --- @param prescribedObserversLookup table The prescribed observers for the epoch + --- @return ComputedRewards # The total eligible rewards + function epochs.computeTotalEligibleRewardsForEpoch(epochIndex, prescribedObserversLookup) + if epochIndex < 0 then + return { + totalEligibleGateways = 0, + totalEligibleRewards = 0, + perGatewayReward = 0, + perObserverReward = 0, + potentialRewards = {}, + } + end + local distributionSettings = epochs.getDistributionSettings() + local epochStartTimestamp = epochs.getEpochTimestampsForIndex(epochIndex) + local activeGatewayAddresses = gar.getActiveGatewayAddressesBeforeTimestamp(epochStartTimestamp) + local protocolBalance = balances.getBalance(ao.id) + local rewardRate = epochs.getRewardRateForEpoch(epochIndex) + local totalEligibleRewards = math.floor(protocolBalance * rewardRate) + local eligibleGatewayReward = #activeGatewayAddresses > 0 + and math.floor( + totalEligibleRewards * distributionSettings.gatewayOperatorRewardRate / #activeGatewayAddresses + ) + or 0 + local eligibleObserverReward = utils.lengthOfTable(prescribedObserversLookup) > 0 + and math.floor( + totalEligibleRewards + * distributionSettings.observerRewardRate + / utils.lengthOfTable(prescribedObserversLookup) + ) + or 0 + -- compute for each gateway what their potential rewards are and for their delegates + local potentialRewards = {} + -- use ipairs as activeGatewayAddresses is an array + for _, gatewayAddress in ipairs(activeGatewayAddresses) do + local gateway = gar.getGateway(gatewayAddress) + if gateway ~= nil then + local potentialReward = eligibleGatewayReward -- start with the gateway reward + -- it it is a prescribed observer for the epoch, it is eligible for the observer reward + if prescribedObserversLookup[gateway.observerAddress] then + potentialReward = potentialReward + eligibleObserverReward -- add observer reward if it is a prescribed observer + end + -- if any delegates are present, distribute the rewards to the delegates + local eligibleDelegateRewards = gateway.totalDelegatedStake > 0 + and math.floor(potentialReward * (gateway.settings.delegateRewardShareRatio / 100)) + or 0 + -- set the potential reward for the gateway + local eligibleOperatorRewards = potentialReward - eligibleDelegateRewards + local eligibleRewardsForGateway = { + operatorReward = eligibleOperatorRewards, + delegateRewards = {}, + } + -- use pairs as gateway.delegates is map + for delegateAddress, delegate in pairs(gateway.delegates) do + if gateway.totalDelegatedStake > 0 then + local delegateReward = math.floor( + (delegate.delegatedStake / gateway.totalDelegatedStake) * eligibleDelegateRewards + ) + if delegateReward > 0 then + eligibleRewardsForGateway.delegateRewards[delegateAddress] = delegateReward + end + end + end + -- set the potential rewards for the gateway + potentialRewards[gatewayAddress] = eligibleRewardsForGateway + end + end + return { + totalEligibleGateways = #activeGatewayAddresses, + totalEligibleRewards = totalEligibleRewards, + perGatewayReward = eligibleGatewayReward, + perObserverReward = eligibleObserverReward, + potentialRewards = potentialRewards, + } + end + --- Distributes the rewards for a prescribed epoch + --- 1. Calculate the rewards for the epoch based on protocol balance + --- 2. Allocate 95% of the rewards for passed gateways, 5% for observers - based on total gateways during the epoch and # of prescribed observers + --- 3. Distribute the rewards to the gateways and observers + --- 4. Increment the epoch stats for the gateways + --- @param epochIndexToDistribute number The epoch to distribute + --- @param currentTimestamp number The current timestamp + --- @return DistributedEpoch | nil # The updated epoch with the distributed rewards, or nil if no rewards were distributed + function epochs.distributeEpoch(epochIndexToDistribute, currentTimestamp) + if epochIndexToDistribute < 0 then + -- silently ignore - Distribution can only occur after the epoch has ended + return nil + end + + -- get the epoch reference to avoid extra copying, it will be set to nil after the distribution is complete + local epochToDistribute = epochs.getEpoch(epochIndexToDistribute) + + if not epochToDistribute then + -- TODO: consider throwing an error here instead of silently returning, as this is a critical error and should be fixed + print("Epoch " .. epochIndexToDistribute .. " not found in state. Skipping distribution.") + return nil + end + + --- The epoch was already distributed and should be cleaned up + --- @cast epochToDistribute DistributedEpoch + if epochToDistribute.distributions.distributedTimestamp then + print( + "Rewards already distributed for epoch. Epoch will be removed from the epoch registry: " + .. epochIndexToDistribute + ) + Epochs[epochIndexToDistribute] = nil + return nil -- do not return the epoch as it has already been distributed, and we do not want to send redundant epoch-distributed-notices + end + + -- ensure we are not distributing the current epoch, that can only happen + if currentTimestamp < epochToDistribute.endTimestamp then + print("Epoch will be distributed after the current timestamp: " .. epochToDistribute.endTimestamp) + return nil + end + + print("Distributing epoch: " .. epochIndexToDistribute) + + local eligibleGatewaysForEpoch = epochToDistribute.distributions.rewards.eligible or {} + local prescribedObserversLookup = epochToDistribute.prescribedObservers or {} + local totalEligibleObserverReward = epochToDistribute.distributions.totalEligibleObserverReward or 0 + local totalEligibleGatewayReward = epochToDistribute.distributions.totalEligibleGatewayReward or 0 + local totalObservationsSubmitted = utils.lengthOfTable(epochToDistribute.observations.reports) or 0 + local prescribedObserversWithWeights = + epochs.getPrescribedObserversWithWeightsForEpoch(epochToDistribute.epochIndex) + local epochToDistributeReports = epochToDistribute.observations and epochToDistribute.observations.reports or {} + local epochToDistributeFailureSummaries = epochToDistribute.observations + and epochToDistribute.observations.failureSummaries + or {} + local missedObservationPenaltyRate = epochs.getDistributionSettings().missedObservationPenaltyRate + local distributed = {} + for gatewayAddress, totalEligibleRewardsForGateway in pairs(eligibleGatewaysForEpoch) do + local gateway = gar.getGateway(gatewayAddress) + -- only distribute rewards if the gateway is found and not leaving + if gateway and totalEligibleRewardsForGateway and gateway.status ~= "leaving" then + -- check the observations to see if gateway passed, if 50% or more of the observers marked the gateway as failed, it is considered failed + local observersMarkedFailed = epochToDistributeFailureSummaries[gatewayAddress] or {} + local failed = #observersMarkedFailed > (totalObservationsSubmitted / 2) -- more than 50% of observations submitted marked gateway as failed + + -- if prescribed, we'll update the prescribed stats as well - find if the observer address is in prescribed observers + local isPrescribed = prescribedObserversLookup[gateway.observerAddress] + + local observationSubmitted = isPrescribed and epochToDistributeReports[gateway.observerAddress] ~= nil + + local updatedStats = { + totalEpochCount = gateway.stats.totalEpochCount + 1, + failedEpochCount = failed and gateway.stats.failedEpochCount + 1 or gateway.stats.failedEpochCount, + failedConsecutiveEpochs = failed and gateway.stats.failedConsecutiveEpochs + 1 or 0, + passedConsecutiveEpochs = failed and 0 or gateway.stats.passedConsecutiveEpochs + 1, + passedEpochCount = failed and gateway.stats.passedEpochCount or gateway.stats.passedEpochCount + 1, + prescribedEpochCount = isPrescribed and gateway.stats.prescribedEpochCount + 1 + or gateway.stats.prescribedEpochCount, + observedEpochCount = observationSubmitted and gateway.stats.observedEpochCount + 1 + or gateway.stats.observedEpochCount, + } + + -- update the gateway stats, returns the updated gateway + gateway = gar.updateGatewayStats(gatewayAddress, gateway, updatedStats) + + -- Scenarios + -- 1. Gateway passed and was prescribed and submitted an observation - it gets full gateway reward + -- 2. Gateway passed and was prescribed and did not submit an observation - it gets only the gateway reward, docked by 25% + -- 2. Gateway passed and was not prescribed -- it gets full operator reward + -- 3. Gateway failed and was prescribed and did not submit observation -- it gets no reward + -- 3. Gateway failed and was prescribed and did submit observation -- it gets the observer reward + -- 4. Gateway failed and was not prescribed -- it gets no reward + local earnedRewardForGatewayAndDelegates = 0 + if not failed then + if isPrescribed then + if observationSubmitted then + -- 1. gateway passed and was prescribed and submitted an observation - it gets full reward + earnedRewardForGatewayAndDelegates = + math.floor(totalEligibleGatewayReward + totalEligibleObserverReward) + else + -- 2. gateway passed and was prescribed and did not submit an observation - it gets only the gateway reward, docked by 25% + earnedRewardForGatewayAndDelegates = + math.floor(totalEligibleGatewayReward * (1 - missedObservationPenaltyRate)) + end + else + -- 3. gateway passed and was not prescribed -- it gets full gateway reward + earnedRewardForGatewayAndDelegates = math.floor(totalEligibleGatewayReward) + end + else + if isPrescribed then + if observationSubmitted then + -- 3. gateway failed and was prescribed and did submit an observation -- it gets the observer reward + earnedRewardForGatewayAndDelegates = math.floor(totalEligibleObserverReward) + end + end + end + + local totalEligibleRewardsForGatewayAndDelegates = totalEligibleRewardsForGateway.operatorReward + + utils.sumTableValues(totalEligibleRewardsForGateway.delegateRewards) + + if earnedRewardForGatewayAndDelegates > 0 and totalEligibleRewardsForGatewayAndDelegates > 0 then + local percentOfEligibleEarned = earnedRewardForGatewayAndDelegates + / totalEligibleRewardsForGatewayAndDelegates -- percent of what was earned vs what was eligible + -- optimally this is 1, but if the gateway did not do what it was supposed to do, it will be less than 1 and thus all payouts will be less + local totalDistributedToDelegates = 0 + local totalRewardsForMissingDelegates = 0 + -- distribute all the predetermined rewards to the delegates + for delegateAddress, eligibleDelegateReward in pairs(totalEligibleRewardsForGateway.delegateRewards) do + local actualDelegateReward = math.floor(eligibleDelegateReward * percentOfEligibleEarned) + -- distribute the rewards to the delegate if greater than 0 and the delegate still exists on the gateway and has a stake greater than 0 + if actualDelegateReward > 0 then + if gar.isDelegateEligibleForDistributions(gateway, delegateAddress) then + -- increase the stake and decrease the protocol balance, returns the updated gateway + gateway = gar.increaseExistingDelegateStake( + gatewayAddress, + gateway, + delegateAddress, + actualDelegateReward + ) + balances.reduceBalance(ao.id, actualDelegateReward) + -- update the distributed rewards for the delegate + distributed[delegateAddress] = (distributed[delegateAddress] or 0) + + actualDelegateReward + totalDistributedToDelegates = totalDistributedToDelegates + actualDelegateReward + else + totalRewardsForMissingDelegates = totalRewardsForMissingDelegates + actualDelegateReward + end + end + end + -- transfer the remaining rewards to the gateway + local actualOperatorReward = math.floor( + earnedRewardForGatewayAndDelegates + - totalDistributedToDelegates + - totalRewardsForMissingDelegates + ) + if actualOperatorReward > 0 then + -- distribute the rewards to the gateway and allow potentially unsafe addresses given they can join the network if they are transferred balance + balances.transfer(gatewayAddress, ao.id, actualOperatorReward, true) + -- move that balance to the gateway if auto-staking is on + if gateway.settings.autoStake then + -- only increase stake if the gateway is joined, otherwise it is leaving and cannot accept additional stake so distribute rewards to the operator directly + gar.increaseOperatorStake(gatewayAddress, actualOperatorReward) + end + end + -- update the distributed rewards for the gateway + distributed[gatewayAddress] = (distributed[gatewayAddress] or 0) + actualOperatorReward + end + end + end + + -- create a distributed epoch from the prescribed epoch + local distributedEpoch = convertPrescribedEpochToDistributedEpoch( + epochToDistribute, + currentTimestamp, + distributed, + prescribedObserversWithWeights + ) + -- remove the epoch from the epoch table + Epochs[epochIndexToDistribute] = nil + return distributedEpoch + end + + --- Creates a distributed epoch from a prescribed epoch + --- @param epoch PrescribedEpoch # The prescribed epoch + --- @param currentTimestamp number # The current timestamp + --- @param distributed table # The distributed rewards for the epoch + --- @return DistributedEpoch # The distributed epoch + function convertPrescribedEpochToDistributedEpoch(epoch, currentTimestamp, distributed, prescribedObservers) + return { + hashchain = epoch.hashchain, + epochIndex = epoch.epochIndex, + startTimestamp = epoch.startTimestamp, + endTimestamp = epoch.endTimestamp, + startHeight = epoch.startHeight, + prescribedObservers = prescribedObservers, + prescribedNames = epoch.prescribedNames, + observations = epoch.observations, + distributions = { + distributedTimestamp = currentTimestamp, + totalDistributedRewards = utils.sumTableValues(distributed), + totalEligibleGateways = epoch.distributions.totalEligibleGateways, + totalEligibleRewards = epoch.distributions.totalEligibleRewards, + totalEligibleGatewayReward = epoch.distributions.totalEligibleGatewayReward, + totalEligibleObserverReward = epoch.distributions.totalEligibleObserverReward, + rewards = { + eligible = epoch.distributions.rewards.eligible or {}, + distributed = distributed or {}, + }, + }, + arnsStats = epoch.arnsStats, + } + end + + --- Gets the reward rate for an epoch. The reward rate is the percentage of the protocol balance that is distributed to the gateways and observers. + --- For the first year, the reward rate is 0.1% of the protocol balance. + --- After the first year, the reward rate decays linearly to 0.05% of the protocol balance after 1.5 years. + ---@param epochIndex number + ---@returns number + function epochs.getRewardRateForEpoch(epochIndex) + local distributionSettings = epochs.getDistributionSettings() + + -- if we are before the decay start, return the maximum reward rate (0.1%) + if epochIndex < distributionSettings.rewardDecayStartEpoch then + return distributionSettings.maximumRewardRate + end + + -- if at the end of the decay period, return the minimum reward rate (0.05%) + if epochIndex > distributionSettings.rewardDecayLastEpoch then + return distributionSettings.minimumRewardRate + end + -- if we are in the decay period (1 year to 1.5 years), return the linearly decaying reward rate + local totalDecayPeriod = ( + distributionSettings.rewardDecayLastEpoch - distributionSettings.rewardDecayStartEpoch + ) + local epochsAlreadyDecayed = (epochIndex - distributionSettings.rewardDecayStartEpoch) + local decayRatePerEpoch = (distributionSettings.maximumRewardRate - distributionSettings.minimumRewardRate) + / totalDecayPeriod + local totalRateDecayed = decayRatePerEpoch * epochsAlreadyDecayed + local totalRewardRateDecayed = distributionSettings.maximumRewardRate - totalRateDecayed + -- avoid floating point precision issues, round to 5 decimal places + return utils.roundToPrecision(totalRewardRateDecayed, 5) + end + + --- @class EligibleRewards + --- @field recipient WalletAddress + --- @field eligibleReward mARIO + --- @field gatewayAddress WalletAddress + --- @field type "delegateReward"|"operatorReward" + --- @field cursorId string gatewayAddress concatenated with recipient for pagination + + --- Gets the distributions for the current epoch + --- @param currentTimestamp number + --- @param cursor string|nil The cursor to paginate from + --- @param limit number The limit of records to return + --- @param sortBy string|nil The field to sort by + --- @param sortOrder string The order to sort by + --- @return PaginatedTable The paginated eligible distributions for the epoch + function epochs.getEligibleRewardsForEpoch(currentTimestamp, cursor, limit, sortBy, sortOrder) + local epochIndex = epochs.getEpochIndexForTimestamp(currentTimestamp) + if epochIndex < 0 then + return {} + end + local epoch = epochs.getEpochUnsafe(epochIndex) + if + not epoch + or not epoch.distributions + or not epoch.distributions.rewards + or not epoch.distributions.rewards.eligible + then + return {} + end + + local rewardsArray = {} + for gatewayAddress, reward in pairs(epoch.distributions.rewards.eligible) do + table.insert(rewardsArray, { + type = "operatorReward", + recipient = gatewayAddress, + eligibleReward = reward.operatorReward, + gatewayAddress = gatewayAddress, + cursorId = gatewayAddress .. "_" .. gatewayAddress, + }) + + for delegateAddress, delegateRewardQty in pairs(reward.delegateRewards) do + table.insert(rewardsArray, { + type = "delegateReward", + recipient = delegateAddress, + eligibleReward = delegateRewardQty, + gatewayAddress = gatewayAddress, + cursorId = gatewayAddress .. "_" .. delegateAddress, + }) + end + end + + return utils.paginateTableWithCursor(rewardsArray, cursor, "cursorId", limit, sortBy, sortOrder) + end + + return epochs +end + +_G.package.loaded[".src.epochs"] = _loaded_mod_src_epochs() + +-- module: ".src.primary_names" +local function _loaded_mod_src_primary_names() + local arns = require(".src.arns") + local balances = require(".src.balances") + local utils = require(".src.utils") + local gar = require(".src.gar") + local constants = require(".src.constants") + local demand = require(".src.demand") + local primaryNames = {} + + --- @alias WalletAddress string + --- @alias ArNSName string + + --- @class PrimaryNames + --- @field owners table - map indexed by owner address containing the primary name and all metadata, used for reverse lookups + --- @field names table - map indexed by primary name containing the owner address, used for reverse lookups + --- @field requests table - map indexed by owner address containing the request, used for pruning expired requests + + --- @class PrimaryName + --- @field name ArNSName + --- @field startTimestamp number + + --- @class PrimaryNameWithOwner + --- @field name ArNSName + --- @field owner WalletAddress + --- @field startTimestamp number + + --- @class PrimaryNameInfo + --- @field name ArNSName + --- @field owner WalletAddress + --- @field startTimestamp number + --- @field processId WalletAddress + + --- @class PrimaryNameRequest + --- @field name ArNSName -- the name being requested + --- @field startTimestamp number -- the timestamp of the request + --- @field endTimestamp number -- the timestamp of the request expiration + + --- @class CreatePrimaryNameResult + --- @field request PrimaryNameRequest|nil + --- @field newPrimaryName PrimaryNameWithOwner|nil + --- @field baseNameOwner WalletAddress + --- @field fundingPlan table + --- @field fundingResult table + --- @field demandFactor table + --- + -- NOTE: lua 5.3 has limited regex support, particularly for lookaheads and negative lookaheads or use of {n} + ---@param name string + ---@description Asserts that the provided name is a valid undername + ---@example + ---```lua + ---utils.assertValidateUndername("my-undername") + ---``` + function primaryNames.assertValidUndername(name) + --- RULES FOR UNDERNAMES + --- min 1 char + --- max 61 chars + --- no starting dashes or underscores + --- alphanumeric, dashes, underscores OR one '@' sign + + local validLength = #name <= constants.MAX_UNDERNAME_LENGTH + assert(validLength, "Undername is too long, recieved length of " .. tostring(#name)) + local validRegex = string.match(name, constants.ARNS_NAME_SINGLE_CHAR_REGEX) ~= nil + or string.match(name, constants.UNDERNAME_REGEX) ~= nil + local valid = validLength and validRegex + assert(valid, "Invalid undername " .. name) + end + + --- Asserts that a name is a valid Primary name + --- Validates the undername and base name + --- @param name string The name to check + function primaryNames.assertValidPrimaryName(name) + assert(name and type(name) == "string", "Name is required and must be a string.") + + assert( + #name <= constants.MAX_PRIMARY_NAME_LENGTH, + "Primary Name with length " + .. #name + .. " exceeds maximum allowable length of " + .. constants.MAX_PRIMARY_NAME_LENGTH + ) + + local baseName = utils.baseNameForName(name) + arns.assertValidArNSName(baseName) + local undername = utils.undernameForName(name) + if undername then + primaryNames.assertValidUndername(undername) + end + end + + --- Creates a transient request for a primary name. This is done by a user and must be approved by the name owner of the base name. + --- @param name string -- the name being requested, this could be an undername and should always be lower case + --- @param initiator WalletAddress -- the address that is creating the primary name request, e.g. the ANT process id + --- @param timestamp number -- the timestamp of the request + --- @param msgId string -- the message id of the request + --- @param fundFrom "balance"|"stakes"|"any"|nil -- the address to fund the request from. Default is "balance" + --- @return CreatePrimaryNameResult # the request created, or the primary name with owner data if the request is approved + function primaryNames.createPrimaryNameRequest(name, initiator, timestamp, msgId, fundFrom) + fundFrom = fundFrom or "balance" + + primaryNames.assertValidPrimaryName(name) + + name = string.lower(name) + local baseName = utils.baseNameForName(name) + + --- check the primary name request for the initiator does not already exist for the same name + --- this allows the caller to create a new request and pay the fee again, so long as it is for a different name + local existingRequest = primaryNames.getPrimaryNameRequest(initiator) + assert( + not existingRequest or existingRequest.name ~= name, + "Primary name request by '" .. initiator .. "' for '" .. name .. "' already exists" + ) + + --- check the primary name is not already owned + local primaryNameOwner = primaryNames.getAddressForPrimaryName(name) + assert(not primaryNameOwner, "Primary name is already owned") + + local record = arns.getRecord(baseName) + assert(record, "ArNS record '" .. baseName .. "' does not exist") + assert(arns.recordIsActive(record, timestamp), "ArNS record '" .. baseName .. "' is not active") + + local requestCost = arns.getTokenCost({ + intent = "Primary-Name-Request", + name = name, + currentTimestamp = timestamp, + record = record, + }) + + local fundingPlan = gar.getFundingPlan(initiator, requestCost.tokenCost, fundFrom) + assert(fundingPlan and fundingPlan.shortfall == 0, "Insufficient balances") + local fundingResult = gar.applyFundingPlan(fundingPlan, msgId, timestamp) + assert(fundingResult.totalFunded == requestCost.tokenCost, "Funding plan application failed") + + --- transfer the primary name cost from the initiator to the protocol balance + balances.increaseBalance(ao.id, requestCost.tokenCost) + demand.tallyNamePurchase(requestCost.tokenCost) + + local request = { + name = name, + startTimestamp = timestamp, + endTimestamp = timestamp + constants.PRIMARY_NAME_REQUEST_DURATION_MS, + } + + --- if the initiator is base name owner, then just set the primary name and return + local newPrimaryName + if record.processId == initiator then + newPrimaryName = primaryNames.setPrimaryNameFromRequest(initiator, request, timestamp) + else + -- otherwise store the request for asynchronous approval + PrimaryNames.requests[initiator] = request + primaryNames.scheduleNextPrimaryNamesPruning(request.endTimestamp) + end + + return { + request = request, + newPrimaryName = newPrimaryName, + baseNameOwner = record.processId, + fundingPlan = fundingPlan, + fundingResult = fundingResult, + demandFactor = demand.getDemandFactorInfo(), + } + end + + --- Get a primary name request, safely deep copying the request + --- @param address WalletAddress + --- @return PrimaryNameRequest|nil primaryNameClaim - the request found, or nil if it does not exist + function primaryNames.getPrimaryNameRequest(address) + return utils.deepCopy(primaryNames.getUnsafePrimaryNameRequests()[address]) + end + + --- Unsafe access to the primary name requests + --- @return table primaryNameClaims - the primary name requests + function primaryNames.getUnsafePrimaryNameRequests() + return PrimaryNames.requests or {} + end + + function primaryNames.getUnsafePrimaryNames() + return PrimaryNames.names or {} + end + + --- Unsafe access to the primary name owners + --- @return table primaryNames - the primary names + function primaryNames.getUnsafePrimaryNameOwners() + return PrimaryNames.owners or {} + end + + --- @class PrimaryNameRequestApproval + --- @field newPrimaryName PrimaryNameWithOwner + --- @field request PrimaryNameRequest + + --- Action taken by the owner of a primary name. This is who pays for the primary name. + --- @param recipient string -- the address that is requesting the primary name + --- @param from string -- the process id that is requesting the primary name for the owner + --- @param timestamp number -- the timestamp of the request + --- @return PrimaryNameRequestApproval # the primary name with owner data and original request + function primaryNames.approvePrimaryNameRequest(recipient, name, from, timestamp) + local request = primaryNames.getPrimaryNameRequest(recipient) + assert(request, "Primary name request not found") + assert(request.endTimestamp > timestamp, "Primary name request has expired") + assert(name == request.name, "Provided name does not match the primary name request") + + -- assert the process id in the initial request still owns the name + local baseName = utils.baseNameForName(request.name) + local record = arns.getRecord(baseName) + assert(record, "ArNS record '" .. baseName .. "' does not exist") + assert(record.processId == from, "Primary name request must be approved by the owner of the base name") + + -- set the primary name + local newPrimaryName = primaryNames.setPrimaryNameFromRequest(recipient, request, timestamp) + return { + newPrimaryName = newPrimaryName, + request = request, + } + end + + --- Update the primary name maps and return the primary name. Removes the request from the requests map. + --- @param recipient string -- the address that is requesting the primary name + --- @param request PrimaryNameRequest + --- @param startTimestamp number + --- @return PrimaryNameWithOwner # the primary name with owner data + function primaryNames.setPrimaryNameFromRequest(recipient, request, startTimestamp) + --- if the owner has an existing primary name, make sure we remove it from the maps before setting the new one + local existingPrimaryName = primaryNames.getPrimaryNameDataWithOwnerFromAddress(recipient) + if existingPrimaryName then + primaryNames.removePrimaryName(existingPrimaryName.name, recipient) + end + PrimaryNames.names[request.name] = recipient + PrimaryNames.owners[recipient] = { + name = request.name, + startTimestamp = startTimestamp, + } + PrimaryNames.requests[recipient] = nil + return { + name = request.name, + owner = recipient, + startTimestamp = startTimestamp, + } + end + + --- @class RemovedPrimaryNameResult + --- @field name string + --- @field owner WalletAddress + + --- Remove primary names, returning the results of the name removals + --- @param names string[] + --- @param from string + --- @return RemovedPrimaryNameResult[] removedPrimaryNameResults - the results of the name removals + function primaryNames.removePrimaryNames(names, from) + local removedPrimaryNamesAndOwners = {} + for _, name in pairs(names) do + local removedPrimaryNameAndOwner = primaryNames.removePrimaryName(name, from) + table.insert(removedPrimaryNamesAndOwners, removedPrimaryNameAndOwner) + end + return removedPrimaryNamesAndOwners + end + + --- Release a primary name + --- @param name ArNSName -- the name being released + --- @param from WalletAddress -- the address that is releasing the primary name, or the owner of the base name + --- @return RemovedPrimaryNameResult + function primaryNames.removePrimaryName(name, from) + --- assert the from is the current owner of the name + local primaryName = primaryNames.getPrimaryNameDataWithOwnerFromName(name) + assert(primaryName, "Primary name '" .. name .. "' does not exist") + local baseName = utils.baseNameForName(name) + local record = arns.getRecord(baseName) + assert( + primaryName.owner == from or (record and record.processId == from), + "Caller is not the owner of the primary name, or the owner of the " .. baseName .. " record" + ) + + PrimaryNames.names[name] = nil + PrimaryNames.owners[primaryName.owner] = nil + if PrimaryNames.requests[primaryName.owner] and PrimaryNames.requests[primaryName.owner].name == name then + PrimaryNames.requests[primaryName.owner] = nil + end + return { + name = name, + owner = primaryName.owner, + } + end + + --- Get the address for a primary name, allowing for forward lookups (e.g. "foo.bar" -> "0x123") + --- @param name string + --- @return WalletAddress|nil address -- the address for the primary name, or nil if it does not exist + function primaryNames.getAddressForPrimaryName(name) + return PrimaryNames.names[name] + end + + --- Get the name data for an address, allowing for reverse lookups (e.g. "0x123" -> "foo.bar") + --- @param address string + --- @return PrimaryNameInfo|nil -- the primary name with owner data, or nil if it does not exist + function primaryNames.getPrimaryNameDataWithOwnerFromAddress(address) + local nameData = PrimaryNames.owners[address] + + if not nameData then + return nil + end + return { + + owner = address, + name = nameData.name, + startTimestamp = nameData.startTimestamp, + processId = arns.getProcessIdForRecord(utils.baseNameForName(nameData.name)), + } + end + + --- Complete name resolution, returning the owner and name data for a name + --- @param name string + --- @return PrimaryNameInfo|nil - the primary name with owner data and processId, or nil if it does not exist + function primaryNames.getPrimaryNameDataWithOwnerFromName(name) + local owner = primaryNames.getAddressForPrimaryName(name) + if not owner then + return nil + end + local nameData = primaryNames.getPrimaryNameDataWithOwnerFromAddress(owner) + if not nameData then + return nil + end + return nameData + end + + ---Finds all primary names with a given base name + --- @param baseName string -- the base name to find primary names for (e.g. "test" to find "undername_test") + --- @return PrimaryNameWithOwner[] primaryNamesForArNSName - the primary names with owner data + function primaryNames.getPrimaryNamesForBaseName(baseName) + local primaryNamesForArNSName = {} + for name, _ in pairs(primaryNames.getUnsafePrimaryNames()) do + local nameData = primaryNames.getPrimaryNameDataWithOwnerFromName(name) + if nameData and utils.baseNameForName(name) == baseName then + table.insert(primaryNamesForArNSName, nameData) + end + end + -- sort by name length + table.sort(primaryNamesForArNSName, function(a, b) + return #a.name < #b.name + end) + return primaryNamesForArNSName + end + + --- @class RemovedPrimaryName + --- @field owner WalletAddress + --- @field name ArNSName + + --- Remove all primary names with a given base name + --- @param baseName string + --- @return RemovedPrimaryName[] removedPrimaryNames - the results of the name removals + function primaryNames.removePrimaryNamesForBaseName(baseName) + local removedNames = {} + local primaryNamesForBaseName = primaryNames.getPrimaryNamesForBaseName(baseName) + for _, nameData in pairs(primaryNamesForBaseName) do + local removedName = primaryNames.removePrimaryName(nameData.name, nameData.owner) + table.insert(removedNames, removedName) + end + return removedNames + end + + --- Get paginated primary names + --- @param cursor string|nil + --- @param limit number + --- @param sortBy string + --- @param sortOrder string + --- @return PaginatedTable paginatedPrimaryNames - the paginated primary names + function primaryNames.getPaginatedPrimaryNames(cursor, limit, sortBy, sortOrder) + local primaryNamesArray = {} + local cursorField = "name" + for owner, primaryName in pairs(primaryNames.getUnsafePrimaryNameOwners()) do + table.insert(primaryNamesArray, { + name = primaryName.name, + owner = owner, + startTimestamp = primaryName.startTimestamp, + processId = arns.getProcessIdForRecord(utils.baseNameForName(primaryName.name)), + }) + end + return utils.paginateTableWithCursor(primaryNamesArray, cursor, cursorField, limit, sortBy, sortOrder) + end + + --- Get paginated primary name requests + --- @param cursor string|nil + --- @param limit number + --- @param sortBy string + --- @param sortOrder string + --- @return PaginatedTable paginatedPrimaryNameRequests - the paginated primary name requests + function primaryNames.getPaginatedPrimaryNameRequests(cursor, limit, sortBy, sortOrder) + local primaryNameRequestsArray = {} + local cursorField = "initiator" + for initiator, request in pairs(primaryNames.getUnsafePrimaryNameRequests()) do + table.insert(primaryNameRequestsArray, { + name = request.name, + startTimestamp = request.startTimestamp, + endTimestamp = request.endTimestamp, + initiator = initiator, + }) + end + return utils.paginateTableWithCursor(primaryNameRequestsArray, cursor, cursorField, limit, sortBy, sortOrder) + end + + --- Prune expired primary name requests + --- @param timestamp number + --- @return table prunedNameClaims - the names of the requests that were pruned + function primaryNames.prunePrimaryNameRequests(timestamp) + local prunedNameRequests = {} + if not NextPrimaryNamesPruneTimestamp or timestamp < NextPrimaryNamesPruneTimestamp then + -- No known requests to prune + return prunedNameRequests + end + + -- reset the next prune timestamp, below will populate it with the next prune timestamp minimum + NextPrimaryNamesPruneTimestamp = nil + + for initiator, request in pairs(primaryNames.getUnsafePrimaryNameRequests()) do + if request.endTimestamp <= timestamp then + PrimaryNames.requests[initiator] = nil + prunedNameRequests[initiator] = request + else + primaryNames.scheduleNextPrimaryNamesPruning(request.endTimestamp) + end + end + return prunedNameRequests + end + + --- @param timestamp Timestamp + function primaryNames.scheduleNextPrimaryNamesPruning(timestamp) + NextPrimaryNamesPruneTimestamp = math.min(NextPrimaryNamesPruneTimestamp or timestamp, timestamp) + end + + function primaryNames.nextPrimaryNamesPruneTimestamp() + return NextPrimaryNamesPruneTimestamp + end + + return primaryNames +end + +_G.package.loaded[".src.primary_names"] = _loaded_mod_src_primary_names() + +-- module: ".src.prune" +local function _loaded_mod_src_prune() + local arns = require(".src.arns") + local gar = require(".src.gar") + local vaults = require(".src.vaults") + local primaryNames = require(".src.primary_names") + local prune = {} + + ---@class PruneStateResult + ---@field prunedRecords table + ---@field newGracePeriodRecords table + ---@field prunedReturnedNames table + ---@field prunedReserved table + ---@field prunedVaults table + ---@field pruneGatewaysResult table + ---@field prunedPrimaryNamesAndOwners table + ---@field prunedPrimaryNameRequests table + ---@field delegatorsWithFeeReset WalletAddress[] + + --- Prunes the state + --- @param timestamp number The timestamp + --- @param msgId string The message ID + --- @param lastGracePeriodEntryEndTimestamp number|nil The end timestamp of the last known record to enter grace period + --- @return PruneStateResult pruneStateResult - the result of the state pruning + function prune.pruneState(timestamp, msgId, lastGracePeriodEntryEndTimestamp) + local prunedRecords, newGracePeriodRecords = arns.pruneRecords(timestamp, lastGracePeriodEntryEndTimestamp) + -- for all the pruned records, create returned names and remove primary name claims + local prunedPrimaryNamesAndOwners = {} + for name, _ in pairs(prunedRecords) do + -- remove primary names + local removedPrimaryNamesAndOwners = primaryNames.removePrimaryNamesForBaseName(name) + if #removedPrimaryNamesAndOwners > 0 then + prunedPrimaryNamesAndOwners[name] = removedPrimaryNamesAndOwners + end + -- create returned names for records that have finally expired + arns.createReturnedName(name, timestamp, ao.id) + end + local prunedPrimaryNameRequests = primaryNames.prunePrimaryNameRequests(timestamp) + local prunedReturnedNames = arns.pruneReturnedNames(timestamp) + local prunedReserved = arns.pruneReservedNames(timestamp) + local prunedVaults = vaults.pruneVaults(timestamp) + local pruneGatewaysResult = gar.pruneGateways(timestamp, msgId) + local delegatorsWithFeeReset = gar.pruneRedelegationFeeData(timestamp) + + return { + prunedRecords = prunedRecords, + newGracePeriodRecords = newGracePeriodRecords, + prunedReturnedNames = prunedReturnedNames, + prunedReserved = prunedReserved, + prunedVaults = prunedVaults, + pruneGatewaysResult = pruneGatewaysResult, + prunedPrimaryNamesAndOwners = prunedPrimaryNamesAndOwners, + prunedPrimaryNameRequests = prunedPrimaryNameRequests, + delegatorsWithFeeReset = delegatorsWithFeeReset, + } + end + + return prune +end + +_G.package.loaded[".src.prune"] = _loaded_mod_src_prune() + +-- module: ".src.tick" +local function _loaded_mod_src_tick() + local epochs = require(".src.epochs") + local gar = require(".src.gar") + local tick = {} + + --- @class TickResult + --- @field maybeNewEpoch PrescribedEpoch | nil The new epoch + --- @field maybePrescribedEpoch PrescribedEpoch | nil The prescribed epoch + --- @field maybeDistributedEpoch DistributedEpoch | nil The distributed epoch + --- @field maybeDemandFactor number | nil The demand factor + --- @field pruneGatewaysResult PruneGatewaysResult The prune gateways result + + --- Ticks an epoch. A tick is the process of updating the demand factor, distributing rewards, pruning gateways, and creating a new epoch. + --- @param currentTimestamp number The current timestamp + --- @param currentBlockHeight number The current block height + --- @param currentHashchain string The current hashchain + --- @param currentMsgId string The current message ID + --- @param epochIndexToTick number The epoch index to tick + --- @return TickResult # The ticked epoch + function tick.tickEpoch(currentTimestamp, currentBlockHeight, currentHashchain, currentMsgId, epochIndexToTick) + if currentTimestamp < epochs.getSettings().epochZeroStartTimestamp then + print("Genesis epoch has not started yet, skipping tick") + return { + maybeNewEpoch = nil, + maybePrescribedEpoch = nil, + maybeDistributedEpoch = nil, + } + end + -- distribute rewards for the epoch and increments stats for gateways, this closes the epoch if the timestamp is greater than the epochs required distribution timestamp + local distributedEpoch = epochs.distributeEpoch(epochIndexToTick, currentTimestamp) + -- prune any gateway that has hit the failed 30 consecutive epoch threshold after the epoch has been distributed + local pruneGatewaysResult = gar.pruneGateways(currentTimestamp, currentMsgId) + -- now create the new epoch with the current message hashchain and block height + local newPrescribedEpoch = + epochs.createAndPrescribeNewEpoch(currentTimestamp, currentBlockHeight, currentHashchain) + return { + maybeDistributedEpoch = distributedEpoch, + maybeNewEpoch = newPrescribedEpoch, + pruneGatewaysResult = pruneGatewaysResult, + } + end + + return tick +end + +_G.package.loaded[".src.tick"] = _loaded_mod_src_tick() + +-- module: ".src.ao_event" +local function _loaded_mod_src_ao_event() + local utils = require(".src.utils") + local json = require(".src.json") + + --- @class AOEvent + --- @field data table The data table holding the event fields. + --- @field sampleRate number|nil Optional sample rate. + --- @field addField fun(self: AOEvent, key: string, value: any): AOEvent Adds a single field to the event. + --- @field addFields fun(self: AOEvent, fields: table): AOEvent Adds multiple fields to the event. + --- @field addFieldsIfExist fun(self: AOEvent, table: table|nil, fields: table): AOEvent Adds specific fields if they exist in the given table. + --- @field addFieldsWithPrefixIfExist fun(self: AOEvent, srcTable: table, prefix: string, fields: table): AOEvent + --- Adds fields with a prefix if they exist in the source table. + --- @field printEvent fun(self: AOEvent): nil Prints the event in JSON format. + --- @field toJSON fun(self: AOEvent): string Converts the event to a JSON string. + + --- Factory function for creating an "AOEvent" + --- @param initialData table Optional initial data to populate the event with. + --- @returns AOEvent + local function AOEvent(initialData) + local event = { + sampleRate = nil, -- Optional sample rate + } + + if type(initialData) ~= "table" then + print("ERROR: AOEvent data must be a table.") + event.data = {} + else + event.data = initialData + end + + local function isValidTableValueType(value) + local valueType = type(value) + return valueType == "string" or valueType == "number" or valueType == "boolean" or value == nil + end + + local function isValidType(value) + local valueType = type(value) + if isValidTableValueType(value) then + return true + elseif valueType == "table" then + -- Prevent nested tables + for _, v in pairs(value) do + if not isValidTableValueType(v) then + return false + end + end + return true + end + return false + end + + --- Add a field to the event + --- @param key string The key to add to the event. + --- @param value any The value to add to the event. + --- @param trainCase boolean|nil Whether to convert the key to Train Case. Defaults to true. + function event:addField(key, value, trainCase) + trainCase = trainCase ~= false -- default to true unless explicitly set to false + if type(key) ~= "string" then + print("ERROR: Field key must be a string.") + return self + end + if not isValidType(value) then + print( + "ERROR: Invalid field value type for key (" + .. key + .. "): " + .. type(value) + .. ". Supported types are string, number, boolean, or nil." + ) + if type(value) == "table" then + print("Invalid field value: " .. json.encode(value)) + end + return self + end + self.data[trainCase and utils.toTrainCase(key) or key] = value + return self + end + + function event:addFields(fields) + if type(fields) ~= "table" then + print("ERROR: Fields must be provided as a table.") + return self + end + for key, value in pairs(fields) do + self:addField(key, value) + end + return self + end + + function event:addFieldsIfExist(table, fields) + table = table == nil and {} or table -- allow for nil OR a table, but not other falsey value types + if type(table) ~= "table" then + print("ERROR: Table and fields must be provided as tables.") + return self + end + for _, key in pairs(fields) do + if table[key] then + self:addField(key, table[key]) + end + end + return self + end + + function event:addFieldsWithPrefixIfExist(srcTable, prefix, fields) + srcTable = srcTable == nil and {} or srcTable -- allow for nil OR a table, but not other falsey value types + if type(srcTable) ~= "table" or type(fields) ~= "table" then + print("ERROR: table and fields must be provided as a table.") + return self + end + for _, key in pairs(fields) do + if srcTable[key] ~= nil then + self:addField(prefix .. key, srcTable[key]) + end + end + return self + end + + function event:printEvent() + print(self:toJSON()) + end + + function event:toJSON() + -- The _e: 1 flag signifies that this is an event. Ensure it is set. + self.data["_e"] = 1 + return json.encode(self.data) + end + + return event + end + + -- Return the AOEvent function to make it accessible from other files + return AOEvent +end + +_G.package.loaded[".src.ao_event"] = _loaded_mod_src_ao_event() + +-- module: ".src.ario_event" +local function _loaded_mod_src_ario_event() + local AOEvent = require(".src.ao_event") + local utils = require(".src.utils") + + --- @alias ARIOEvent AOEvent + + --- Convenience factory function for pre populating analytic and msg fields into AOEvents + --- @param msg table + --- @param initialData table | nil Optional initial data to populate the event with. + --- @returns ARIOEvent + local function ARIOEvent(msg, initialData) + local event = AOEvent({ + Cron = msg.Cron or false, + Cast = msg.Cast or false, + }) + event:addFields(msg.Tags or {}) + event:addFieldsIfExist(msg, { "From", "Timestamp", "Action" }) + event:addField("Message-Id", msg.Id) + event:addField("From-Formatted", utils.formatAddress(msg.From)) + event:addField("Memory-KiB-Used", collectgarbage("count"), false) + if initialData ~= nil then + event:addFields(initialData) + end + return event + end + + return ARIOEvent +end + +_G.package.loaded[".src.ario_event"] = _loaded_mod_src_ario_event() + +-- module: ".src.main" +local function _loaded_mod_src_main() + local main = {} + local constants = require(".src.constants") + local token = require(".src.token") + local utils = require(".src.utils") + local json = require(".src.json") + local balances = require(".src.balances") + local arns = require(".src.arns") + local gar = require(".src.gar") + local demand = require(".src.demand") + local epochs = require(".src.epochs") + local vaults = require(".src.vaults") + local prune = require(".src.prune") + local tick = require(".src.tick") + local primaryNames = require(".src.primary_names") + local ARIOEvent = require(".src.ario_event") + + -- handlers that are critical should discard the memory on error (see prune for an example) + local CRITICAL = true + + local ActionMap = { + -- reads + Info = "Info", + TotalSupply = "Total-Supply", -- for token.lua spec compatibility, gives just the total supply (circulating + locked + staked + delegated + withdraw) + TotalTokenSupply = "Total-Token-Supply", -- gives the total token supply and all components (protocol balance, locked supply, staked supply, delegated supply, and withdraw supply) + Transfer = "Transfer", + Balance = "Balance", + Balances = "Balances", + DemandFactor = "Demand-Factor", + DemandFactorInfo = "Demand-Factor-Info", + DemandFactorSettings = "Demand-Factor-Settings", + -- EPOCH READ APIS + Epoch = "Epoch", + EpochSettings = "Epoch-Settings", + PrescribedObservers = "Epoch-Prescribed-Observers", + PrescribedNames = "Epoch-Prescribed-Names", + Observations = "Epoch-Observations", + Distributions = "Epoch-Distributions", + EpochRewards = "Epoch-Eligible-Rewards", + --- Vaults + Vault = "Vault", + Vaults = "Vaults", + CreateVault = "Create-Vault", + VaultedTransfer = "Vaulted-Transfer", + ExtendVault = "Extend-Vault", + IncreaseVault = "Increase-Vault", + RevokeVault = "Revoke-Vault", + -- GATEWAY REGISTRY READ APIS + Gateway = "Gateway", + Gateways = "Gateways", + GatewayRegistrySettings = "Gateway-Registry-Settings", + Delegates = "Delegates", + JoinNetwork = "Join-Network", + LeaveNetwork = "Leave-Network", + IncreaseOperatorStake = "Increase-Operator-Stake", + DecreaseOperatorStake = "Decrease-Operator-Stake", + UpdateGatewaySettings = "Update-Gateway-Settings", + SaveObservations = "Save-Observations", + DelegateStake = "Delegate-Stake", + RedelegateStake = "Redelegate-Stake", + DecreaseDelegateStake = "Decrease-Delegate-Stake", + CancelWithdrawal = "Cancel-Withdrawal", + InstantWithdrawal = "Instant-Withdrawal", + RedelegationFee = "Redelegation-Fee", + AllPaginatedDelegates = "All-Paginated-Delegates", + AllGatewayVaults = "All-Gateway-Vaults", + --- ArNS + Record = "Record", + Records = "Records", + BuyName = "Buy-Name", + UpgradeName = "Upgrade-Name", + ExtendLease = "Extend-Lease", + IncreaseUndernameLimit = "Increase-Undername-Limit", + ReassignName = "Reassign-Name", + ReleaseName = "Release-Name", + ReservedNames = "Reserved-Names", + ReservedName = "Reserved-Name", + TokenCost = "Token-Cost", + CostDetails = "Cost-Details", + RegistrationFees = "Registration-Fees", + ReturnedNames = "Returned-Names", + ReturnedName = "Returned-Name", + AllowDelegates = "Allow-Delegates", + DisallowDelegates = "Disallow-Delegates", + Delegations = "Delegations", + -- PRIMARY NAMES + RemovePrimaryNames = "Remove-Primary-Names", + RequestPrimaryName = "Request-Primary-Name", + PrimaryNameRequest = "Primary-Name-Request", + PrimaryNameRequests = "Primary-Name-Requests", + ApprovePrimaryNameRequest = "Approve-Primary-Name-Request", + PrimaryNames = "Primary-Names", + PrimaryName = "Primary-Name", + } + + --- @param msg ParsedMessage + --- @param response any + local function Send(msg, response) + if msg.reply then + --- Reference: https://github.com/permaweb/aos/blob/main/blueprints/patch-legacy-reply.lua + msg.reply(response) + else + ao.send(response) + end + end + + local function eventingPcall(ioEvent, onError, fnToCall, ...) + local status, result = pcall(fnToCall, ...) + if not status then + onError(result) + ioEvent:addField("Error", result) + return status, result + end + return status, result + end + + --- @param fundingPlan FundingPlan|nil + --- @param rewardForInitiator number|nil only applies in buy record for returned names + local function adjustSuppliesForFundingPlan(fundingPlan, rewardForInitiator) + if not fundingPlan then + return + end + rewardForInitiator = rewardForInitiator or 0 + local totalActiveStakesUsed = utils.reduce(fundingPlan.stakes, function(acc, _, stakeSpendingPlan) + return acc + stakeSpendingPlan.delegatedStake + end, 0) + local totalWithdrawStakesUsed = utils.reduce(fundingPlan.stakes, function(acc, _, stakeSpendingPlan) + return acc + + utils.reduce(stakeSpendingPlan.vaults, function(acc2, _, vaultBalance) + return acc2 + vaultBalance + end, 0) + end, 0) + LastKnownStakedSupply = LastKnownStakedSupply - totalActiveStakesUsed + LastKnownWithdrawSupply = LastKnownWithdrawSupply - totalWithdrawStakesUsed + LastKnownCirculatingSupply = LastKnownCirculatingSupply - fundingPlan.balance + rewardForInitiator + end + + --- @param ioEvent ARIOEvent + --- @param result BuyNameResult|RecordInteractionResult|CreatePrimaryNameResult|PrimaryNameRequestApproval + local function addResultFundingPlanFields(ioEvent, result) + ioEvent:addFieldsWithPrefixIfExist(result.fundingPlan, "FP-", { "balance" }) + local fundingPlanVaultsCount = 0 + local fundingPlanStakesAmount = utils.reduce( + result.fundingPlan and result.fundingPlan.stakes or {}, + function(acc, _, delegation) + return acc + + delegation.delegatedStake + + utils.reduce(delegation.vaults, function(acc2, _, vaultAmount) + fundingPlanVaultsCount = fundingPlanVaultsCount + 1 + return acc2 + vaultAmount + end, 0) + end, + 0 + ) + if fundingPlanStakesAmount > 0 then + ioEvent:addField("FP-Stakes-Amount", fundingPlanStakesAmount) + end + if fundingPlanVaultsCount > 0 then + ioEvent:addField("FP-Vaults-Count", fundingPlanVaultsCount) + end + local newWithdrawVaultsTallies = utils.reduce( + result.fundingResult and result.fundingResult.newWithdrawVaults or {}, + function(acc, _, newWithdrawVault) + acc.totalBalance = acc.totalBalance + + utils.reduce(newWithdrawVault, function(acc2, _, vault) + acc.count = acc.count + 1 + return acc2 + vault.balance + end, 0) + return acc + end, + { count = 0, totalBalance = 0 } + ) + if newWithdrawVaultsTallies.count > 0 then + ioEvent:addField("New-Withdraw-Vaults-Count", newWithdrawVaultsTallies.count) + ioEvent:addField("New-Withdraw-Vaults-Total-Balance", newWithdrawVaultsTallies.totalBalance) + end + adjustSuppliesForFundingPlan(result.fundingPlan, result.returnedName and result.returnedName.rewardForInitiator) + end + + --- @param ioEvent ARIOEvent + ---@param result RecordInteractionResult|BuyNameResult + local function addRecordResultFields(ioEvent, result) + ioEvent:addFieldsIfExist(result, { + "baseRegistrationFee", + "remainingBalance", + "protocolBalance", + "recordsCount", + "reservedRecordsCount", + "totalFee", + }) + ioEvent:addFieldsIfExist(result.record, { "startTimestamp", "endTimestamp", "undernameLimit", "purchasePrice" }) + if result.df ~= nil and type(result.df) == "table" then + ioEvent:addField("DF-Trailing-Period-Purchases", (result.df.trailingPeriodPurchases or {})) + ioEvent:addField("DF-Trailing-Period-Revenues", (result.df.trailingPeriodRevenues or {})) + ioEvent:addFieldsWithPrefixIfExist(result.df, "DF-", { + "currentPeriod", + "currentDemandFactor", + "consecutivePeriodsWithMinDemandFactor", + "revenueThisPeriod", + "purchasesThisPeriod", + }) + end + addResultFundingPlanFields(ioEvent, result) + end + + local function addReturnedNameResultFields(ioEvent, result) + ioEvent:addFieldsIfExist(result, { + "rewardForInitiator", + "rewardForProtocol", + "type", + "years", + }) + ioEvent:addFieldsIfExist(result.record, { "startTimestamp", "endTimestamp", "undernameLimit", "purchasePrice" }) + ioEvent:addFieldsIfExist(result.returnedName, { + "name", + "initiator", + "startTimestamp", + }) + -- TODO: add removedPrimaryNamesAndOwners to ioEvent + addResultFundingPlanFields(ioEvent, result) + end + + --- @class SupplyData + --- @field circulatingSupply number|nil + --- @field lockedSupply number|nil + --- @field stakedSupply number|nil + --- @field delegatedSupply number|nil + --- @field withdrawSupply number|nil + --- @field totalTokenSupply number|nil + --- @field protocolBalance number|nil + + --- @param ioEvent ARIOEvent + --- @param supplyData SupplyData|nil + local function addSupplyData(ioEvent, supplyData) + supplyData = supplyData or {} + ioEvent:addField("Circulating-Supply", supplyData.circulatingSupply or LastKnownCirculatingSupply) + ioEvent:addField("Locked-Supply", supplyData.lockedSupply or LastKnownLockedSupply) + ioEvent:addField("Staked-Supply", supplyData.stakedSupply or LastKnownStakedSupply) + ioEvent:addField("Delegated-Supply", supplyData.delegatedSupply or LastKnownDelegatedSupply) + ioEvent:addField("Withdraw-Supply", supplyData.withdrawSupply or LastKnownWithdrawSupply) + ioEvent:addField("Total-Token-Supply", supplyData.totalTokenSupply or token.lastKnownTotalTokenSupply()) + ioEvent:addField("Protocol-Balance", Balances[ao.id]) + end + + --- @param ioEvent ARIOEvent + --- @param talliesData StateObjectTallies|GatewayObjectTallies|nil + local function addTalliesData(ioEvent, talliesData) + ioEvent:addFieldsIfExist(talliesData, { + "numAddressesVaulting", + "numBalanceVaults", + "numBalances", + "numDelegateVaults", + "numDelegatesVaulting", + "numDelegates", + "numDelegations", + "numExitingDelegations", + "numGatewayVaults", + "numGatewaysVaulting", + "numGateways", + "numExitingGateways", + }) + end + + local function gatewayStats() + local numJoinedGateways = 0 + local numLeavingGateways = 0 + for _, gateway in pairs(GatewayRegistry) do + if gateway.status == "joined" then + numJoinedGateways = numJoinedGateways + 1 + else + numLeavingGateways = numLeavingGateways + 1 + end + end + return { + joined = numJoinedGateways, + leaving = numLeavingGateways, + } + end + + --- @param ioEvent ARIOEvent + --- @param pruneGatewaysResult PruneGatewaysResult + local function addPruneGatewaysResult(ioEvent, pruneGatewaysResult) + LastKnownCirculatingSupply = LastKnownCirculatingSupply + + (pruneGatewaysResult.delegateStakeReturned or 0) + + (pruneGatewaysResult.gatewayStakeReturned or 0) + + LastKnownWithdrawSupply = LastKnownWithdrawSupply + - (pruneGatewaysResult.delegateStakeReturned or 0) + - (pruneGatewaysResult.gatewayStakeReturned or 0) + + (pruneGatewaysResult.delegateStakeWithdrawing or 0) + + (pruneGatewaysResult.gatewayStakeWithdrawing or 0) + + LastKnownDelegatedSupply = LastKnownDelegatedSupply - (pruneGatewaysResult.delegateStakeWithdrawing or 0) + + local totalGwStakesSlashed = (pruneGatewaysResult.stakeSlashed or 0) + LastKnownStakedSupply = LastKnownStakedSupply + - totalGwStakesSlashed + - (pruneGatewaysResult.gatewayStakeWithdrawing or 0) + + if totalGwStakesSlashed > 0 then + ioEvent:addField("Total-Gateways-Stake-Slashed", totalGwStakesSlashed) + end + + local prunedGateways = pruneGatewaysResult.prunedGateways or {} + local prunedGatewaysCount = utils.lengthOfTable(prunedGateways) + if prunedGatewaysCount > 0 then + ioEvent:addField("Pruned-Gateways", prunedGateways) + ioEvent:addField("Pruned-Gateways-Count", prunedGatewaysCount) + local gwStats = gatewayStats() + ioEvent:addField("Joined-Gateways-Count", gwStats.joined) + ioEvent:addField("Leaving-Gateways-Count", gwStats.leaving) + end + + local slashedGateways = pruneGatewaysResult.slashedGateways or {} + local slashedGatewaysCount = utils.lengthOfTable(slashedGateways or {}) + if slashedGatewaysCount > 0 then + ioEvent:addField("Slashed-Gateway-Amounts", slashedGateways) + ioEvent:addField("Slashed-Gateways-Count", slashedGatewaysCount) + local invariantSlashedGateways = {} + for gwAddress, _ in pairs(slashedGateways) do + local unsafeGateway = gar.getGatewayUnsafe(gwAddress) or {} + if unsafeGateway and (unsafeGateway.totalDelegatedStake > 0) then + invariantSlashedGateways[gwAddress] = unsafeGateway.totalDelegatedStake + end + end + if utils.lengthOfTable(invariantSlashedGateways) > 0 then + ioEvent:addField("Invariant-Slashed-Gateways", invariantSlashedGateways) + end + end + + addTalliesData(ioEvent, pruneGatewaysResult.gatewayObjectTallies) + end + + --- @param ioEvent ARIOEvent + local function addNextPruneTimestampsData(ioEvent) + ioEvent:addField("Next-Returned-Names-Prune-Timestamp", arns.nextReturnedNamesPruneTimestamp()) + ioEvent:addField("Next-Records-Prune-Timestamp", arns.nextRecordsPruneTimestamp()) + ioEvent:addField("Next-Vaults-Prune-Timestamp", vaults.nextVaultsPruneTimestamp()) + ioEvent:addField("Next-Gateways-Prune-Timestamp", gar.nextGatewaysPruneTimestamp()) + ioEvent:addField("Next-Redelegations-Prune-Timestamp", gar.nextRedelegationsPruneTimestamp()) + ioEvent:addField("Next-Primary-Names-Prune-Timestamp", primaryNames.nextPrimaryNamesPruneTimestamp()) + end + + --- @param ioEvent ARIOEvent + --- @param prunedStateResult PruneStateResult + local function addNextPruneTimestampsResults(ioEvent, prunedStateResult) + --- @type PruneGatewaysResult + local pruneGatewaysResult = prunedStateResult.pruneGatewaysResult + + -- If anything meaningful was pruned, collect the next prune timestamps + if + next(prunedStateResult.prunedReturnedNames) + or next(prunedStateResult.prunedPrimaryNameRequests) + or next(prunedStateResult.prunedRecords) + or next(pruneGatewaysResult.prunedGateways) + or next(prunedStateResult.delegatorsWithFeeReset) + or next(pruneGatewaysResult.slashedGateways) + or pruneGatewaysResult.delegateStakeReturned > 0 + or pruneGatewaysResult.gatewayStakeReturned > 0 + or pruneGatewaysResult.delegateStakeWithdrawing > 0 + or pruneGatewaysResult.gatewayStakeWithdrawing > 0 + or pruneGatewaysResult.stakeSlashed > 0 + then + addNextPruneTimestampsData(ioEvent) + end + end + + local function assertValidFundFrom(fundFrom) + if fundFrom == nil then + return + end + local validFundFrom = utils.createLookupTable({ "any", "balance", "stakes" }) + assert(validFundFrom[fundFrom], "Invalid fund from type. Must be one of: any, balance, stakes") + end + + --- @param ioEvent ARIOEvent + local function addPrimaryNameCounts(ioEvent) + ioEvent:addField("Total-Primary-Names", utils.lengthOfTable(primaryNames.getUnsafePrimaryNames())) + ioEvent:addField( + "Total-Primary-Name-Requests", + utils.lengthOfTable(primaryNames.getUnsafePrimaryNameRequests()) + ) + end + + --- @param ioEvent ARIOEvent + --- @param primaryNameResult CreatePrimaryNameResult|PrimaryNameRequestApproval + local function addPrimaryNameRequestData(ioEvent, primaryNameResult) + ioEvent:addFieldsIfExist(primaryNameResult, { "baseNameOwner" }) + ioEvent:addFieldsIfExist(primaryNameResult.newPrimaryName, { "owner", "startTimestamp" }) + ioEvent:addFieldsWithPrefixIfExist(primaryNameResult.request, "Request-", { "startTimestamp", "endTimestamp" }) + addResultFundingPlanFields(ioEvent, primaryNameResult) + addPrimaryNameCounts(ioEvent) + + -- demand factor data + if primaryNameResult.demandFactor and type(primaryNameResult.demandFactor) == "table" then + ioEvent:addField( + "DF-Trailing-Period-Purchases", + (primaryNameResult.demandFactor.trailingPeriodPurchases or {}) + ) + ioEvent:addField( + "DF-Trailing-Period-Revenues", + (primaryNameResult.demandFactor.trailingPeriodRevenues or {}) + ) + ioEvent:addFieldsWithPrefixIfExist(primaryNameResult.demandFactor, "DF-", { + "currentPeriod", + "currentDemandFactor", + "consecutivePeriodsWithMinDemandFactor", + "revenueThisPeriod", + "purchasesThisPeriod", + }) + end + end + + local function assertValueBytesLowerThan(value, remainingBytes, tablesSeen) + tablesSeen = tablesSeen or {} + + local t = type(value) + if t == "string" then + remainingBytes = remainingBytes - #value + elseif t == "number" or t == "boolean" then + remainingBytes = remainingBytes - 8 -- Approximate size for numbers/booleans + elseif t == "table" and not tablesSeen[value] then + tablesSeen[value] = true + for k, v in pairs(value) do + remainingBytes = assertValueBytesLowerThan(k, remainingBytes, tablesSeen) + remainingBytes = assertValueBytesLowerThan(v, remainingBytes, tablesSeen) + end + end + + if remainingBytes <= 0 then + error("Data size is too large") + end + return remainingBytes + end + + -- Sanitize inputs before every interaction + local function assertAndSanitizeInputs(msg) + if msg.Tags.Action ~= "Eval" and msg.Data then + assertValueBytesLowerThan(msg.Data, 100) + end + + assert( + -- TODO: replace this with LastKnownMessageTimestamp after node release 23.0.0 + msg.Timestamp and tonumber(msg.Timestamp) >= 0, + "Timestamp must be greater than or equal to the last known message timestamp of " + .. LastKnownMessageTimestamp + .. " but was " + .. msg.Timestamp + ) + assert(msg.From, "From is required") + assert(msg.Tags and type(msg.Tags) == "table", "Tags are required") + + msg.Tags = utils.validateAndSanitizeInputs(msg.Tags) + msg.From = utils.formatAddress(msg.From) + msg.Timestamp = msg.Timestamp and tonumber(msg.Timestamp) -- Timestamp should always be provided by the CU + end + + local function updateLastKnownMessage(msg) + if msg.Timestamp >= LastKnownMessageTimestamp then + LastKnownMessageTimestamp = msg.Timestamp + LastKnownMessageId = msg.Id + end + end + + --- @class ParsedMessage + --- @field Id string + --- @field Action string + --- @field From string + --- @field Timestamp Timestamp + --- @field Tags table + --- @field ioEvent ARIOEvent + --- @field Cast boolean? + --- @field reply? fun(response: any) + + --- @param handlerName string + --- @param pattern fun(msg: ParsedMessage):'continue'|boolean + --- @param handleFn fun(msg: ParsedMessage) + --- @param critical boolean? + --- @param printEvent boolean? + local function addEventingHandler(handlerName, pattern, handleFn, critical, printEvent) + critical = critical or false + printEvent = printEvent == nil and true or printEvent + Handlers.add(handlerName, pattern, function(msg) + -- add an ARIOEvent to the message if it doesn't exist + msg.ioEvent = msg.ioEvent or ARIOEvent(msg) + -- global handler for all eventing errors, so we can log them and send a notice to the sender for non critical errors and discard the memory on critical errors + local status, resultOrError = eventingPcall(msg.ioEvent, function(error) + --- non critical errors will send an invalid notice back to the caller with the error information, memory is not discarded + Send(msg, { + Target = msg.From, + Action = "Invalid-" .. utils.toTrainCase(handlerName) .. "-Notice", + Error = tostring(error), + Data = tostring(error), + }) + end, handleFn, msg) + if not status and critical then + local errorEvent = ARIOEvent(msg) + -- For critical handlers we want to make sure the event data gets sent to the CU for processing, but that the memory is discarded on failures + -- These handlers (distribute, prune) severely modify global state, and partial updates are dangerous. + -- So we json encode the error and the event data and then throw, so the CU will discard the memory and still process the event data. + -- An alternative approach is to modify the implementation of ao.result - to also return the Output on error. + -- Reference: https://github.com/permaweb/ao/blob/76a618722b201430a372894b3e2753ac01e63d3d/dev-cli/src/starters/lua/ao.lua#L284-L287 + local errorWithEvent = tostring(resultOrError) .. "\n" .. errorEvent:toJSON() + error(errorWithEvent, 0) -- 0 ensures not to include this line number in the error message + end + + msg.ioEvent:addField("Handler-Memory-KiB-Used", collectgarbage("count"), false) + collectgarbage("collect") + msg.ioEvent:addField("Final-Memory-KiB-Used", collectgarbage("count"), false) + + if printEvent then + msg.ioEvent:printEvent() + end + end) + end + + addEventingHandler("sanitize", function() + return "continue" + end, function(msg) + assertAndSanitizeInputs(msg) + updateLastKnownMessage(msg) + end, CRITICAL, false) + + -- NOTE: THIS IS A CRITICAL HANDLER AND WILL DISCARD THE MEMORY ON ERROR + addEventingHandler("prune", function() + return "continue" -- continue is a pattern that matches every message and continues to the next handler that matches the tags + end, function(msg) + local epochIndex = epochs.getEpochIndexForTimestamp(msg.Timestamp) + msg.ioEvent:addField("Epoch-Index", epochIndex) + + local previousStateSupplies = { + protocolBalance = Balances[ao.id], + lastKnownCirculatingSupply = LastKnownCirculatingSupply, + lastKnownLockedSupply = LastKnownLockedSupply, + lastKnownStakedSupply = LastKnownStakedSupply, + lastKnownDelegatedSupply = LastKnownDelegatedSupply, + lastKnownWithdrawSupply = LastKnownWithdrawSupply, + lastKnownTotalSupply = token.lastKnownTotalTokenSupply(), + } + + if msg.Tags["Force-Prune"] then + print("Force prune provided, resetting all prune timestamps") + gar.scheduleNextGatewaysPruning(0) + gar.scheduleNextRedelegationsPruning(0) + arns.scheduleNextReturnedNamesPrune(0) + arns.scheduleNextRecordsPrune(0) + primaryNames.scheduleNextPrimaryNamesPruning(0) + vaults.scheduleNextVaultsPruning(0) + end + + print("Pruning state at timestamp: " .. msg.Timestamp) + local prunedStateResult = prune.pruneState(msg.Timestamp, msg.Id, LastGracePeriodEntryEndTimestamp) + if prunedStateResult then + local prunedRecordsCount = utils.lengthOfTable(prunedStateResult.prunedRecords or {}) + if prunedRecordsCount > 0 then + local prunedRecordNames = {} + for name, _ in pairs(prunedStateResult.prunedRecords) do + table.insert(prunedRecordNames, name) + end + msg.ioEvent:addField("Pruned-Records", prunedRecordNames) + msg.ioEvent:addField("Pruned-Records-Count", prunedRecordsCount) + msg.ioEvent:addField("Records-Count", utils.lengthOfTable(NameRegistry.records)) + end + local newGracePeriodRecordsCount = utils.lengthOfTable(prunedStateResult.newGracePeriodRecords or {}) + if newGracePeriodRecordsCount > 0 then + local newGracePeriodRecordNames = {} + for name, record in pairs(prunedStateResult.newGracePeriodRecords) do + table.insert(newGracePeriodRecordNames, name) + if record.endTimestamp > LastGracePeriodEntryEndTimestamp then + LastGracePeriodEntryEndTimestamp = record.endTimestamp + end + end + msg.ioEvent:addField("New-Grace-Period-Records", newGracePeriodRecordNames) + msg.ioEvent:addField("New-Grace-Period-Records-Count", newGracePeriodRecordsCount) + msg.ioEvent:addField("Last-Grace-Period-Entry-End-Timestamp", LastGracePeriodEntryEndTimestamp) + end + local prunedReturnedNames = prunedStateResult.prunedReturnedNames or {} + local prunedReturnedNamesCount = utils.lengthOfTable(prunedReturnedNames) + if prunedReturnedNamesCount > 0 then + msg.ioEvent:addField("Pruned-Returned-Names", prunedReturnedNames) + msg.ioEvent:addField("Pruned-Returned-Name-Count", prunedReturnedNamesCount) + end + local prunedReserved = prunedStateResult.prunedReserved or {} + local prunedReservedCount = utils.lengthOfTable(prunedReserved) + if prunedReservedCount > 0 then + msg.ioEvent:addField("Pruned-Reserved", prunedReserved) + msg.ioEvent:addField("Pruned-Reserved-Count", prunedReservedCount) + end + local prunedVaultsCount = utils.lengthOfTable(prunedStateResult.prunedVaults or {}) + if prunedVaultsCount > 0 then + msg.ioEvent:addField("Pruned-Vaults", prunedStateResult.prunedVaults) + msg.ioEvent:addField("Pruned-Vaults-Count", prunedVaultsCount) + for _, vault in pairs(prunedStateResult.prunedVaults) do + LastKnownLockedSupply = LastKnownLockedSupply - vault.balance + LastKnownCirculatingSupply = LastKnownCirculatingSupply + vault.balance + end + end + + local pruneGatewaysResult = prunedStateResult.pruneGatewaysResult or {} + addPruneGatewaysResult(msg.ioEvent, pruneGatewaysResult) + + local prunedPrimaryNameRequests = prunedStateResult.prunedPrimaryNameRequests or {} + local prunedRequestsCount = utils.lengthOfTable(prunedPrimaryNameRequests) + if prunedRequestsCount > 0 then + msg.ioEvent:addField("Pruned-Requests-Count", prunedRequestsCount) + end + + addNextPruneTimestampsResults(msg.ioEvent, prunedStateResult) + end + + -- add supply data if it has changed since the last state + if + LastKnownCirculatingSupply ~= previousStateSupplies.lastKnownCirculatingSupply + or LastKnownLockedSupply ~= previousStateSupplies.lastKnownLockedSupply + or LastKnownStakedSupply ~= previousStateSupplies.lastKnownStakedSupply + or LastKnownDelegatedSupply ~= previousStateSupplies.lastKnownDelegatedSupply + or LastKnownWithdrawSupply ~= previousStateSupplies.lastKnownWithdrawSupply + or Balances[ao.id] ~= previousStateSupplies.protocolBalance + or token.lastKnownTotalTokenSupply() ~= previousStateSupplies.lastKnownTotalSupply + then + addSupplyData(msg.ioEvent) + end + end, CRITICAL, false) + + -- Write handlers + addEventingHandler(ActionMap.Transfer, utils.hasMatchingTag("Action", ActionMap.Transfer), function(msg) + -- assert recipient is a valid arweave address + local recipient = msg.Tags.Recipient + local quantity = msg.Tags.Quantity + local allowUnsafeAddresses = msg.Tags["Allow-Unsafe-Addresses"] or false + assert(utils.isValidAddress(recipient, allowUnsafeAddresses), "Invalid recipient") + assert( + quantity and quantity > 0 and utils.isInteger(quantity), + "Invalid quantity. Must be integer greater than 0" + ) + assert(recipient ~= msg.From, "Cannot transfer to self") + + msg.ioEvent:addField("RecipientFormatted", recipient) + + local result = balances.transfer(recipient, msg.From, quantity, allowUnsafeAddresses) + if result ~= nil then + local senderNewBalance = result[msg.From] + local recipientNewBalance = result[recipient] + msg.ioEvent:addField("SenderPreviousBalance", senderNewBalance + quantity) + msg.ioEvent:addField("SenderNewBalance", senderNewBalance) + msg.ioEvent:addField("RecipientPreviousBalance", recipientNewBalance - quantity) + msg.ioEvent:addField("RecipientNewBalance", recipientNewBalance) + end + + -- if the sender is the protocol, then we need to update the circulating supply as tokens are now in circulation + if msg.From == ao.id then + LastKnownCirculatingSupply = LastKnownCirculatingSupply + quantity + addSupplyData(msg.ioEvent) + end + + -- Casting implies that the sender does not want a response - Reference: https://elixirforum.com/t/what-is-the-etymology-of-genserver-cast/33610/3 + if not msg.Cast then + -- Debit-Notice message template, that is sent to the Sender of the transfer + local debitNotice = { + Target = msg.From, + Action = "Debit-Notice", + Recipient = recipient, + Quantity = tostring(quantity), + ["Allow-Unsafe-Addresses"] = tostring(allowUnsafeAddresses), + Data = "You transferred " .. msg.Tags.Quantity .. " to " .. recipient, + } + -- Credit-Notice message template, that is sent to the Recipient of the transfer + local creditNotice = { + Target = recipient, + Action = "Credit-Notice", + Sender = msg.From, + Quantity = tostring(quantity), + ["Allow-Unsafe-Addresses"] = tostring(allowUnsafeAddresses), + Data = "You received " .. msg.Tags.Quantity .. " from " .. msg.From, + } + + -- Add forwarded tags to the credit and debit notice messages + local didForwardTags = false + for tagName, tagValue in pairs(msg) do + -- Tags beginning with "X-" are forwarded + if string.sub(tagName, 1, 2) == "X-" then + debitNotice[tagName] = tagValue + creditNotice[tagName] = tagValue + didForwardTags = true + msg.ioEvent:addField(tagName, tagValue) + end + end + if didForwardTags then + msg.ioEvent:addField("ForwardedTags", "true") + end + + -- Send Debit-Notice and Credit-Notice + Send(msg, debitNotice) + Send(msg, creditNotice) + end + end) + + addEventingHandler(ActionMap.CreateVault, utils.hasMatchingTag("Action", ActionMap.CreateVault), function(msg) + local quantity = msg.Tags.Quantity + local lockLengthMs = msg.Tags["Lock-Length"] + local msgId = msg.Id + assert( + lockLengthMs and lockLengthMs > 0 and utils.isInteger(lockLengthMs), + "Invalid lock length. Must be integer greater than 0" + ) + assert( + quantity and utils.isInteger(quantity) and quantity >= constants.MIN_VAULT_SIZE, + "Invalid quantity. Must be integer greater than or equal to " .. constants.MIN_VAULT_SIZE .. " mARIO" + ) + local vault = vaults.createVault(msg.From, quantity, lockLengthMs, msg.Timestamp, msgId) + + if vault ~= nil then + msg.ioEvent:addField("Vault-Id", msgId) + msg.ioEvent:addField("Vault-Balance", vault.balance) + msg.ioEvent:addField("Vault-Start-Timestamp", vault.startTimestamp) + msg.ioEvent:addField("Vault-End-Timestamp", vault.endTimestamp) + end + + LastKnownLockedSupply = LastKnownLockedSupply + quantity + LastKnownCirculatingSupply = LastKnownCirculatingSupply - quantity + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { + Action = ActionMap.CreateVault .. "-Notice", + ["Vault-Id"] = msgId, + }, + Data = json.encode(vault), + }) + end) + + addEventingHandler( + ActionMap.VaultedTransfer, + utils.hasMatchingTag("Action", ActionMap.VaultedTransfer), + function(msg) + local recipient = msg.Tags.Recipient + local quantity = msg.Tags.Quantity + local lockLengthMs = msg.Tags["Lock-Length"] + local msgId = msg.Id + local allowUnsafeAddresses = msg.Tags["Allow-Unsafe-Addresses"] or false + local revokable = msg.Tags.Revokable or false + assert(utils.isValidAddress(recipient, allowUnsafeAddresses), "Invalid recipient") + assert( + lockLengthMs and lockLengthMs > 0 and utils.isInteger(lockLengthMs), + "Invalid lock length. Must be integer greater than 0" + ) + assert( + quantity and utils.isInteger(quantity) and quantity >= constants.MIN_VAULT_SIZE, + "Invalid quantity. Must be integer greater than or equal to " .. constants.MIN_VAULT_SIZE .. " mARIO" + ) + assert(recipient ~= msg.From, "Cannot transfer to self") + + local vault = vaults.vaultedTransfer( + msg.From, + recipient, + quantity, + lockLengthMs, + msg.Timestamp, + msgId, + allowUnsafeAddresses, + revokable + ) + + msg.ioEvent:addField("Vault-Id", msgId) + msg.ioEvent:addField("Vault-Balance", vault.balance) + msg.ioEvent:addField("Vault-Start-Timestamp", vault.startTimestamp) + msg.ioEvent:addField("Vault-End-Timestamp", vault.endTimestamp) + if revokable then + msg.ioEvent:addField("Vault-Controller", msg.From) + end + + LastKnownLockedSupply = LastKnownLockedSupply + quantity + LastKnownCirculatingSupply = LastKnownCirculatingSupply - quantity + addSupplyData(msg.ioEvent) + + -- sender gets an immediate debit notice as the quantity is debited from their balance + Send(msg, { + Target = msg.From, + Recipient = recipient, + Quantity = quantity, + Tags = { + Action = "Debit-Notice", + ["Vault-Id"] = msgId, + ["Allow-Unsafe-Addresses"] = tostring(allowUnsafeAddresses), + }, + Data = json.encode(vault), + }) + -- to the receiver, they get a vault notice + Send(msg, { + Target = recipient, + Quantity = quantity, + Sender = msg.From, + Tags = { + Action = ActionMap.CreateVault .. "-Notice", + ["Vault-Id"] = msgId, + ["Allow-Unsafe-Addresses"] = tostring(allowUnsafeAddresses), + }, + Data = json.encode(vault), + }) + end + ) + + addEventingHandler(ActionMap.RevokeVault, utils.hasMatchingTag("Action", ActionMap.RevokeVault), function(msg) + local vaultId = msg.Tags["Vault-Id"] + local recipient = msg.Tags.Recipient + assert(utils.isValidAddress(vaultId, true), "Invalid vault id") + assert(utils.isValidAddress(recipient, true), "Invalid recipient") + + local vault = vaults.revokeVault(msg.From, recipient, vaultId, msg.Timestamp) + + msg.ioEvent:addField("Vault-Id", vaultId) + msg.ioEvent:addField("Vault-Recipient", recipient) + msg.ioEvent:addField("Vault-Controller", vault.controller) + msg.ioEvent:addField("Vault-Balance", vault.balance) + msg.ioEvent:addField("Vault-Start-Timestamp", vault.startTimestamp) + msg.ioEvent:addField("Vault-End-Timestamp", vault.endTimestamp) + + LastKnownLockedSupply = LastKnownLockedSupply - vault.balance + LastKnownCirculatingSupply = LastKnownCirculatingSupply + vault.balance + addSupplyData(msg.ioEvent) + + -- to the controller, they get a credit notice + Send(msg, { + Target = msg.From, + Recipient = recipient, + Quantity = vault.balance, + Tags = { Action = "Credit-Notice", ["Vault-Id"] = vaultId }, + Data = json.encode(vault), + }) + + -- to the receiver, they get a revoke vault notice + Send(msg, { + Target = recipient, + Quantity = vault.balance, + Sender = msg.From, + Tags = { Action = ActionMap.RevokeVault .. "-Notice", ["Vault-Id"] = vaultId }, + Data = json.encode(vault), + }) + end) + + addEventingHandler(ActionMap.ExtendVault, utils.hasMatchingTag("Action", ActionMap.ExtendVault), function(msg) + local vaultId = msg.Tags["Vault-Id"] + local extendLengthMs = msg.Tags["Extend-Length"] + assert(utils.isValidAddress(vaultId, true), "Invalid vault id") + assert( + extendLengthMs and extendLengthMs > 0 and utils.isInteger(extendLengthMs), + "Invalid extension length. Must be integer greater than 0" + ) + + local vault = vaults.extendVault(msg.From, extendLengthMs, msg.Timestamp, vaultId) + + if vault ~= nil then + msg.ioEvent:addField("Vault-Id", vaultId) + msg.ioEvent:addField("Vault-Balance", vault.balance) + msg.ioEvent:addField("Vault-Start-Timestamp", vault.startTimestamp) + msg.ioEvent:addField("Vault-End-Timestamp", vault.endTimestamp) + msg.ioEvent:addField("Vault-Prev-End-Timestamp", vault.endTimestamp - extendLengthMs) + end + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.ExtendVault .. "-Notice" }, + Data = json.encode(vault), + }) + end) + + addEventingHandler(ActionMap.IncreaseVault, utils.hasMatchingTag("Action", ActionMap.IncreaseVault), function(msg) + local vaultId = msg.Tags["Vault-Id"] + local quantity = msg.Tags.Quantity + assert(utils.isValidAddress(vaultId, true), "Invalid vault id") + assert( + quantity and quantity > 0 and utils.isInteger(quantity), + "Invalid quantity. Must be integer greater than 0" + ) + + local vault = vaults.increaseVault(msg.From, quantity, vaultId, msg.Timestamp) + + if vault ~= nil then + msg.ioEvent:addField("Vault-Id", vaultId) + msg.ioEvent:addField("VaultBalance", vault.balance) + msg.ioEvent:addField("VaultPrevBalance", vault.balance - quantity) + msg.ioEvent:addField("VaultStartTimestamp", vault.startTimestamp) + msg.ioEvent:addField("VaultEndTimestamp", vault.endTimestamp) + end + + LastKnownLockedSupply = LastKnownLockedSupply + quantity + LastKnownCirculatingSupply = LastKnownCirculatingSupply - quantity + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.IncreaseVault .. "-Notice" }, + Data = json.encode(vault), + }) + end) + + addEventingHandler(ActionMap.BuyName, utils.hasMatchingTag("Action", ActionMap.BuyName), function(msg) + local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil + local purchaseType = msg.Tags["Purchase-Type"] and string.lower(msg.Tags["Purchase-Type"]) or "lease" + local years = msg.Tags.Years or nil + local processId = msg.Tags["Process-Id"] + local fundFrom = msg.Tags["Fund-From"] + local allowUnsafeProcessId = msg.Tags["Allow-Unsafe-Addresses"] + assert( + type(purchaseType) == "string" and purchaseType == "lease" or purchaseType == "permabuy", + "Invalid purchase type" + ) + arns.assertValidArNSName(name) + assert(utils.isValidAddress(processId, true), "Process Id must be a valid address.") + if years then + assert( + years >= 1 and years <= 5 and utils.isInteger(years), + "Invalid years. Must be integer between 1 and 5" + ) + end + assertValidFundFrom(fundFrom) + + msg.ioEvent:addField("Name-Length", #name) + + local result = arns.buyRecord( + name, + purchaseType, + years, + msg.From, + msg.Timestamp, + processId, + msg.Id, + fundFrom, + allowUnsafeProcessId + ) + local record = result.record + addRecordResultFields(msg.ioEvent, result) + addSupplyData(msg.ioEvent) + + msg.ioEvent:addField("Records-Count", utils.lengthOfTable(NameRegistry.records)) + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.BuyName .. "-Notice", Name = name }, + Data = json.encode({ + name = name, + startTimestamp = record.startTimestamp, + endTimestamp = record.endTimestamp, + undernameLimit = record.undernameLimit, + type = record.type, + purchasePrice = record.purchasePrice, + processId = record.processId, + fundingResult = fundFrom and result.fundingResult or nil, + fundingPlan = fundFrom and result.fundingPlan or nil, + baseRegistrationFee = result.baseRegistrationFee, + remainingBalance = result.remainingBalance, + returnedName = result.returnedName, + }), + }) + + -- If was returned name, send a credit notice to the initiator + if result.returnedName ~= nil then + Send(msg, { + Target = result.returnedName.initiator, + Action = "Credit-Notice", + Quantity = tostring(result.returnedName.rewardForInitiator), + Data = json.encode({ + name = name, + buyer = msg.From, + rewardForInitiator = result.returnedName.rewardForInitiator, + rewardForProtocol = result.returnedName.rewardForProtocol, + record = result.record, + }), + }) + end + end) + + addEventingHandler("upgradeName", utils.hasMatchingTag("Action", ActionMap.UpgradeName), function(msg) + local fundFrom = msg.Tags["Fund-From"] + local name = string.lower(msg.Tags.Name) + assert(type(name) == "string", "Invalid name") + assertValidFundFrom(fundFrom) + + local result = arns.upgradeRecord(msg.From, name, msg.Timestamp, msg.Id, fundFrom) + + local record = {} + if result ~= nil then + record = result.record + addRecordResultFields(msg.ioEvent, result) + addSupplyData(msg.ioEvent) + end + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.UpgradeName .. "-Notice", Name = name }, + Data = json.encode(fundFrom and result or { + name = name, + startTimestamp = record.startTimestamp, + endTimestamp = record.endTimestamp, + undernameLimit = record.undernameLimit, + purchasePrice = record.purchasePrice, + processId = record.processId, + type = record.type, + }), + }) + end) + + addEventingHandler(ActionMap.ExtendLease, utils.hasMatchingTag("Action", ActionMap.ExtendLease), function(msg) + local fundFrom = msg.Tags["Fund-From"] + local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil + local years = msg.Tags.Years + assert(type(name) == "string", "Invalid name") + assert( + years and years > 0 and years < 5 and utils.isInteger(years), + "Invalid years. Must be integer between 1 and 5" + ) + assertValidFundFrom(fundFrom) + local result = arns.extendLease(msg.From, name, years, msg.Timestamp, msg.Id, fundFrom) + local recordResult = {} + if result ~= nil then + addRecordResultFields(msg.ioEvent, result) + addSupplyData(msg.ioEvent) + recordResult = result.record + end + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.ExtendLease .. "-Notice", Name = name }, + Data = json.encode(fundFrom and result or recordResult), + }) + end) + + addEventingHandler( + ActionMap.IncreaseUndernameLimit, + utils.hasMatchingTag("Action", ActionMap.IncreaseUndernameLimit), + function(msg) + local fundFrom = msg.Tags["Fund-From"] + local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil + local quantity = msg.Tags.Quantity + assert(type(name) == "string", "Invalid name") + assert( + quantity and quantity > 0 and utils.isInteger(quantity), + "Invalid quantity. Must be an integer value greater than 0" + ) + assertValidFundFrom(fundFrom) + + local result = arns.increaseUndernameLimit(msg.From, name, quantity, msg.Timestamp, msg.Id, fundFrom) + local recordResult = {} + if result ~= nil then + recordResult = result.record + addRecordResultFields(msg.ioEvent, result) + msg.ioEvent:addField("Previous-Undername-Limit", recordResult.undernameLimit - msg.Tags.Quantity) + addSupplyData(msg.ioEvent) + end + + Send(msg, { + Target = msg.From, + Tags = { + Action = ActionMap.IncreaseUndernameLimit .. "-Notice", + Name = name, + }, + Data = json.encode(fundFrom and result or recordResult), + }) + end + ) + + function assertTokenCostTags(msg) + local intentType = msg.Tags.Intent + local validIntents = utils.createLookupTable({ + ActionMap.BuyName, + ActionMap.ExtendLease, + ActionMap.IncreaseUndernameLimit, + ActionMap.UpgradeName, + ActionMap.PrimaryNameRequest, + }) + assert( + intentType and type(intentType) == "string" and validIntents[intentType], + "Intent must be valid registry interaction (e.g. Buy-Name, Extend-Lease, Increase-Undername-Limit, Upgrade-Name, Primary-Name-Request). Provided intent: " + .. (intentType or "nil") + ) + if intentType == ActionMap.PrimaryNameRequest then + primaryNames.assertValidPrimaryName(msg.Tags.Name) + else + arns.assertValidArNSName(msg.Tags.Name) + end + + -- if years is provided, assert it is a number and integer between 1 and 5 + if msg.Tags.Years then + assert(utils.isInteger(msg.Tags.Years), "Invalid years. Must be integer") + assert(msg.Tags.Years > 0 and msg.Tags.Years < 6, "Invalid years. Must be between 1 and 5") + end + + -- if quantity provided must be a number and integer greater than 0 + if msg.Tags.Quantity then + assert(utils.isInteger(msg.Tags.Quantity), "Invalid quantity. Must be integer") + assert(msg.Tags.Quantity > 0, "Invalid quantity. Must be greater than 0") + end + end + + addEventingHandler(ActionMap.TokenCost, utils.hasMatchingTag("Action", ActionMap.TokenCost), function(msg) + assertTokenCostTags(msg) + local intent = msg.Tags.Intent + local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil + local years = msg.Tags.Years or nil + local quantity = msg.Tags.Quantity or nil + local purchaseType = msg.Tags["Purchase-Type"] or "lease" + + local intendedAction = { + intent = intent, + name = name, + years = years, + quantity = quantity, + purchaseType = purchaseType, + currentTimestamp = msg.Timestamp, + from = msg.From, + } + + local tokenCostResult = arns.getTokenCost(intendedAction) + local tokenCost = tokenCostResult.tokenCost + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.TokenCost .. "-Notice", ["Token-Cost"] = tostring(tokenCost) }, + Data = json.encode(tokenCost), + }) + end) + + addEventingHandler(ActionMap.CostDetails, utils.hasMatchingTag("Action", ActionMap.CostDetails), function(msg) + local fundFrom = msg.Tags["Fund-From"] + local name = string.lower(msg.Tags.Name) + local years = msg.Tags.Years or 1 + local quantity = msg.Tags.Quantity + local purchaseType = msg.Tags["Purchase-Type"] or "lease" + assertTokenCostTags(msg) + assertValidFundFrom(fundFrom) + + local tokenCostAndFundingPlan = arns.getTokenCostAndFundingPlanForIntent( + msg.Tags.Intent, + name, + years, + quantity, + purchaseType, + msg.Timestamp, + msg.From, + fundFrom + ) + if not tokenCostAndFundingPlan then + return + end + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.CostDetails .. "-Notice" }, + Data = json.encode(tokenCostAndFundingPlan), + }) + end) + + addEventingHandler( + ActionMap.RegistrationFees, + utils.hasMatchingTag("Action", ActionMap.RegistrationFees), + function(msg) + local priceList = arns.getRegistrationFees() + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.RegistrationFees .. "-Notice" }, + Data = json.encode(priceList), + }) + end + ) + + addEventingHandler(ActionMap.JoinNetwork, utils.hasMatchingTag("Action", ActionMap.JoinNetwork), function(msg) + local updatedSettings = { + label = msg.Tags.Label, + note = msg.Tags.Note, + fqdn = msg.Tags.FQDN, + port = msg.Tags.Port or 443, + protocol = msg.Tags.Protocol or "https", + allowDelegatedStaking = msg.Tags["Allow-Delegated-Staking"] == "true" + or msg.Tags["Allow-Delegated-Staking"] == "allowlist", + allowedDelegates = msg.Tags["Allow-Delegated-Staking"] == "allowlist" + and utils.splitAndTrimString(msg.Tags["Allowed-Delegates"] or "", ",") + or nil, + minDelegatedStake = msg.Tags["Min-Delegated-Stake"], + delegateRewardShareRatio = msg.Tags["Delegate-Reward-Share-Ratio"] or 0, + properties = msg.Tags.Properties or "FH1aVetOoulPGqgYukj0VE0wIhDy90WiQoV3U2PeY44", + autoStake = msg.Tags["Auto-Stake"] == "true", + } + + local updatedServices = utils.safeDecodeJson(msg.Tags.Services) + local fromAddress = msg.From + local observerAddress = msg.Tags["Observer-Address"] or fromAddress + local stake = msg.Tags["Operator-Stake"] + + assert(not msg.Tags.Services or updatedServices, "Services must be a valid JSON string") + + msg.ioEvent:addField("Resolved-Observer-Address", observerAddress) + msg.ioEvent:addField("Sender-Previous-Balance", Balances[fromAddress] or 0) + + local gateway = + gar.joinNetwork(fromAddress, stake, updatedSettings, updatedServices, observerAddress, msg.Timestamp) + msg.ioEvent:addField("Sender-New-Balance", Balances[fromAddress] or 0) + if gateway ~= nil then + msg.ioEvent:addField("GW-Start-Timestamp", gateway.startTimestamp) + end + local gwStats = gatewayStats() + msg.ioEvent:addField("Joined-Gateways-Count", gwStats.joined) + msg.ioEvent:addField("Leaving-Gateways-Count", gwStats.leaving) + + LastKnownCirculatingSupply = LastKnownCirculatingSupply - stake + LastKnownStakedSupply = LastKnownStakedSupply + stake + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.JoinNetwork .. "-Notice" }, + Data = json.encode(gateway), + }) + end) + + addEventingHandler(ActionMap.LeaveNetwork, utils.hasMatchingTag("Action", ActionMap.LeaveNetwork), function(msg) + local unsafeGatewayBeforeLeaving = gar.getGatewayUnsafe(msg.From) + local gwPrevTotalDelegatedStake = 0 + local gwPrevStake = 0 + if unsafeGatewayBeforeLeaving ~= nil then + gwPrevTotalDelegatedStake = unsafeGatewayBeforeLeaving.totalDelegatedStake + gwPrevStake = unsafeGatewayBeforeLeaving.operatorStake + end + + assert(unsafeGatewayBeforeLeaving, "Gateway not found") + + local gateway = gar.leaveNetwork(msg.From, msg.Timestamp, msg.Id) + + if gateway ~= nil then + msg.ioEvent:addField("GW-Vaults-Count", utils.lengthOfTable(gateway.vaults or {})) + local exitVault = gateway.vaults[msg.From] + local withdrawVault = gateway.vaults[msg.Id] + local previousStake = exitVault.balance + if exitVault ~= nil then + msg.ioEvent:addFieldsWithPrefixIfExist( + exitVault, + "Exit-Vault-", + { "balance", "startTimestamp", "endTimestamp" } + ) + end + if withdrawVault ~= nil then + previousStake = previousStake + withdrawVault.balance + msg.ioEvent:addFieldsWithPrefixIfExist( + withdrawVault, + "Withdraw-Vault-", + { "balance", "startTimestamp", "endTimestamp" } + ) + end + msg.ioEvent:addField("Previous-Operator-Stake", previousStake) + msg.ioEvent:addFieldsWithPrefixIfExist( + gateway, + "GW-", + { "totalDelegatedStake", "observerAddress", "startTimestamp", "endTimestamp" } + ) + msg.ioEvent:addFields(gateway.stats or {}) + end + + local gwStats = gatewayStats() + msg.ioEvent:addField("Joined-Gateways-Count", gwStats.joined) + msg.ioEvent:addField("Leaving-Gateways-Count", gwStats.leaving) + + LastKnownStakedSupply = LastKnownStakedSupply - gwPrevStake - gwPrevTotalDelegatedStake + LastKnownWithdrawSupply = LastKnownWithdrawSupply + gwPrevStake + gwPrevTotalDelegatedStake + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.LeaveNetwork .. "-Notice" }, + Data = json.encode(gateway), + }) + end) + + addEventingHandler( + ActionMap.IncreaseOperatorStake, + utils.hasMatchingTag("Action", ActionMap.IncreaseOperatorStake), + function(msg) + local quantity = msg.Tags.Quantity + assert( + quantity and utils.isInteger(quantity) and quantity > 0, + "Invalid quantity. Must be integer greater than 0" + ) + + msg.ioEvent:addField("Sender-Previous-Balance", Balances[msg.From]) + local gateway = gar.increaseOperatorStake(msg.From, quantity) + + msg.ioEvent:addField("Sender-New-Balance", Balances[msg.From]) + if gateway ~= nil then + msg.ioEvent:addField("New-Operator-Stake", gateway.operatorStake) + msg.ioEvent:addField("Previous-Operator-Stake", gateway.operatorStake - quantity) + end + + LastKnownCirculatingSupply = LastKnownCirculatingSupply - quantity + LastKnownStakedSupply = LastKnownStakedSupply + quantity + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.IncreaseOperatorStake .. "-Notice" }, + Data = json.encode(gateway), + }) + end + ) + + addEventingHandler( + ActionMap.DecreaseOperatorStake, + utils.hasMatchingTag("Action", ActionMap.DecreaseOperatorStake), + function(msg) + local quantity = msg.Tags.Quantity + local instantWithdraw = msg.Tags.Instant and msg.Tags.Instant == "true" or false + assert( + quantity and utils.isInteger(quantity) and quantity > constants.MIN_WITHDRAWAL_AMOUNT, + "Invalid quantity. Must be integer greater than " .. constants.MIN_WITHDRAWAL_AMOUNT + ) + assert( + msg.Tags.Instant == nil or (msg.Tags.Instant == "true" or msg.Tags.Instant == "false"), + "Instant must be a string with value 'true' or 'false'" + ) + + msg.ioEvent:addField("Sender-Previous-Balance", Balances[msg.From]) + + local result = gar.decreaseOperatorStake(msg.From, quantity, msg.Timestamp, msg.Id, instantWithdraw) + local decreaseOperatorStakeResult = { + gateway = result and result.gateway or {}, + penaltyRate = result and result.penaltyRate or 0, + expeditedWithdrawalFee = result and result.expeditedWithdrawalFee or 0, + amountWithdrawn = result and result.amountWithdrawn or 0, + } + + msg.ioEvent:addField("Sender-New-Balance", Balances[msg.From]) -- should be unchanged + if result ~= nil and result.gateway ~= nil then + local gateway = result.gateway + local previousStake = gateway.operatorStake + quantity + msg.ioEvent:addField("New-Operator-Stake", gateway.operatorStake) + msg.ioEvent:addField("GW-Vaults-Count", utils.lengthOfTable(gateway.vaults or {})) + if instantWithdraw then + msg.ioEvent:addField("Instant-Withdrawal", instantWithdraw) + msg.ioEvent:addField("Instant-Withdrawal-Fee", result.expeditedWithdrawalFee) + msg.ioEvent:addField("Amount-Withdrawn", result.amountWithdrawn) + msg.ioEvent:addField("Penalty-Rate", result.penaltyRate) + end + local decreaseStakeVault = gateway.vaults[msg.Id] + if decreaseStakeVault ~= nil then + previousStake = previousStake + decreaseStakeVault.balance + msg.ioEvent:addFieldsWithPrefixIfExist( + decreaseStakeVault, + "Decrease-Stake-Vault-", + { "balance", "startTimestamp", "endTimestamp" } + ) + end + msg.ioEvent:addField("Previous-Operator-Stake", previousStake) + end + + LastKnownStakedSupply = LastKnownStakedSupply - quantity + if instantWithdraw then + LastKnownCirculatingSupply = LastKnownCirculatingSupply + decreaseOperatorStakeResult.amountWithdrawn + else + LastKnownWithdrawSupply = LastKnownWithdrawSupply + quantity + end + + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { + Action = ActionMap.DecreaseOperatorStake .. "-Notice", + ["Penalty-Rate"] = tostring(decreaseOperatorStakeResult.penaltyRate), + ["Expedited-Withdrawal-Fee"] = tostring(decreaseOperatorStakeResult.expeditedWithdrawalFee), + ["Amount-Withdrawn"] = tostring(decreaseOperatorStakeResult.amountWithdrawn), + }, + Data = json.encode(decreaseOperatorStakeResult.gateway), + }) + end + ) + + addEventingHandler(ActionMap.DelegateStake, utils.hasMatchingTag("Action", ActionMap.DelegateStake), function(msg) + local gatewayTarget = msg.Tags.Target or msg.Tags.Address + local quantity = msg.Tags.Quantity + assert(utils.isValidAddress(gatewayTarget, true), "Invalid gateway address") + assert( + msg.Tags.Quantity and msg.Tags.Quantity > 0 and utils.isInteger(msg.Tags.Quantity), + "Invalid quantity. Must be integer greater than 0" + ) + + msg.ioEvent:addField("Target-Formatted", gatewayTarget) + + local gateway = gar.delegateStake(msg.From, gatewayTarget, quantity, msg.Timestamp) + local delegateResult = {} + if gateway ~= nil then + local newStake = gateway.delegates[msg.From].delegatedStake + msg.ioEvent:addField("Previous-Stake", newStake - quantity) + msg.ioEvent:addField("New-Stake", newStake) + msg.ioEvent:addField("Gateway-Total-Delegated-Stake", gateway.totalDelegatedStake) + delegateResult = gateway.delegates[msg.From] + end + + LastKnownCirculatingSupply = LastKnownCirculatingSupply - quantity + LastKnownDelegatedSupply = LastKnownDelegatedSupply + quantity + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.DelegateStake .. "-Notice", Gateway = gatewayTarget }, + Data = json.encode(delegateResult), + }) + end) + + addEventingHandler( + ActionMap.CancelWithdrawal, + utils.hasMatchingTag("Action", ActionMap.CancelWithdrawal), + function(msg) + local gatewayAddress = msg.Tags.Target or msg.Tags.Address or msg.From + local vaultId = msg.Tags["Vault-Id"] + assert(utils.isValidAddress(gatewayAddress, true), "Invalid gateway address") + assert(utils.isValidAddress(vaultId, true), "Invalid vault id") + + msg.ioEvent:addField("Target-Formatted", gatewayAddress) + + local result = gar.cancelGatewayWithdrawal(msg.From, gatewayAddress, vaultId) + local updatedGateway = {} + if result ~= nil then + updatedGateway = result.gateway + local vaultBalance = result.vaultBalance + local previousOperatorStake = result.previousOperatorStake + local newOperatorStake = result.totalOperatorStake + local previousTotalDelegatedStake = result.previousTotalDelegatedStake + local newTotalDelegatedStake = result.totalDelegatedStake + local operatorStakeChange = newOperatorStake - previousOperatorStake + local delegatedStakeChange = newTotalDelegatedStake - previousTotalDelegatedStake + msg.ioEvent:addField("Previous-Operator-Stake", previousOperatorStake) + msg.ioEvent:addField("New-Operator-Stake", newOperatorStake) + msg.ioEvent:addField("Previous-Total-Delegated-Stake", previousTotalDelegatedStake) + msg.ioEvent:addField("New-Total-Delegated-Stake", newTotalDelegatedStake) + msg.ioEvent:addField("Stake-Amount-Withdrawn", vaultBalance) + LastKnownStakedSupply = LastKnownStakedSupply + operatorStakeChange + LastKnownDelegatedSupply = LastKnownDelegatedSupply + delegatedStakeChange + LastKnownWithdrawSupply = LastKnownWithdrawSupply - vaultBalance + addSupplyData(msg.ioEvent) + end + + Send(msg, { + Target = msg.From, + Tags = { + Action = ActionMap.CancelWithdrawal .. "-Notice", + Address = gatewayAddress, + ["Vault-Id"] = msg.Tags["Vault-Id"], + }, + Data = json.encode(updatedGateway), + }) + end + ) + + addEventingHandler( + ActionMap.InstantWithdrawal, + utils.hasMatchingTag("Action", ActionMap.InstantWithdrawal), + function(msg) + local target = msg.Tags.Target or msg.Tags.Address or msg.From -- if not provided, use sender + local vaultId = msg.Tags["Vault-Id"] + msg.ioEvent:addField("Target-Formatted", target) + assert(utils.isValidAddress(target, true), "Invalid gateway address") + assert(utils.isValidAddress(vaultId, true), "Invalid vault id") + + local result = gar.instantGatewayWithdrawal(msg.From, target, vaultId, msg.Timestamp) + if result ~= nil then + local vaultBalance = result.vaultBalance + msg.ioEvent:addField("Stake-Amount-Withdrawn", vaultBalance) + msg.ioEvent:addField("Vault-Elapsed-Time", result.elapsedTime) + msg.ioEvent:addField("Vault-Remaining-Time", result.remainingTime) + msg.ioEvent:addField("Penalty-Rate", result.penaltyRate) + msg.ioEvent:addField("Instant-Withdrawal-Fee", result.expeditedWithdrawalFee) + msg.ioEvent:addField("Amount-Withdrawn", result.amountWithdrawn) + msg.ioEvent:addField("Previous-Vault-Balance", result.amountWithdrawn + result.expeditedWithdrawalFee) + LastKnownCirculatingSupply = LastKnownCirculatingSupply + result.amountWithdrawn + LastKnownWithdrawSupply = LastKnownWithdrawSupply + - result.amountWithdrawn + - result.expeditedWithdrawalFee + addSupplyData(msg.ioEvent) + Send(msg, { + Target = msg.From, + Tags = { + Action = ActionMap.InstantWithdrawal .. "-Notice", + Address = target, + ["Vault-Id"] = vaultId, + ["Amount-Withdrawn"] = tostring(result.amountWithdrawn), + ["Penalty-Rate"] = tostring(result.penaltyRate), + ["Expedited-Withdrawal-Fee"] = tostring(result.expeditedWithdrawalFee), + }, + Data = json.encode(result), + }) + end + end + ) + + addEventingHandler( + ActionMap.DecreaseDelegateStake, + utils.hasMatchingTag("Action", ActionMap.DecreaseDelegateStake), + function(msg) + local target = msg.Tags.Target or msg.Tags.Address + local quantity = msg.Tags.Quantity + local instantWithdraw = msg.Tags.Instant and msg.Tags.Instant == "true" or false + msg.ioEvent:addField("Target-Formatted", target) + msg.ioEvent:addField("Quantity", quantity) + assert( + quantity and utils.isInteger(quantity) and quantity > constants.MIN_WITHDRAWAL_AMOUNT, + "Invalid quantity. Must be integer greater than " .. constants.MIN_WITHDRAWAL_AMOUNT + ) + + local result = gar.decreaseDelegateStake(target, msg.From, quantity, msg.Timestamp, msg.Id, instantWithdraw) + local decreaseDelegateStakeResult = { + penaltyRate = result and result.penaltyRate or 0, + expeditedWithdrawalFee = result and result.expeditedWithdrawalFee or 0, + amountWithdrawn = result and result.amountWithdrawn or 0, + } + + msg.ioEvent:addField("Sender-New-Balance", Balances[msg.From]) -- should be unchanged + + if result ~= nil then + local newStake = result.updatedDelegate.delegatedStake + msg.ioEvent:addField("Previous-Stake", newStake + quantity) + msg.ioEvent:addField("New-Stake", newStake) + msg.ioEvent:addField("Gateway-Total-Delegated-Stake", result.gatewayTotalDelegatedStake) + + if instantWithdraw then + msg.ioEvent:addField("Instant-Withdrawal", instantWithdraw) + msg.ioEvent:addField("Instant-Withdrawal-Fee", result.expeditedWithdrawalFee) + msg.ioEvent:addField("Amount-Withdrawn", result.amountWithdrawn) + msg.ioEvent:addField("Penalty-Rate", result.penaltyRate) + end + + local newDelegateVaults = result.updatedDelegate.vaults + if newDelegateVaults ~= nil then + msg.ioEvent:addField("Vaults-Count", utils.lengthOfTable(newDelegateVaults)) + local newDelegateVault = newDelegateVaults[msg.Id] + if newDelegateVault ~= nil then + msg.ioEvent:addField("Vault-Id", msg.Id) + msg.ioEvent:addField("Vault-Balance", newDelegateVault.balance) + msg.ioEvent:addField("Vault-Start-Timestamp", newDelegateVault.startTimestamp) + msg.ioEvent:addField("Vault-End-Timestamp", newDelegateVault.endTimestamp) + end + end + end + + LastKnownDelegatedSupply = LastKnownDelegatedSupply - quantity + if not instantWithdraw then + LastKnownWithdrawSupply = LastKnownWithdrawSupply + quantity + end + LastKnownCirculatingSupply = LastKnownCirculatingSupply + decreaseDelegateStakeResult.amountWithdrawn + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { + Action = ActionMap.DecreaseDelegateStake .. "-Notice", + Address = target, + Quantity = quantity, + ["Penalty-Rate"] = tostring(decreaseDelegateStakeResult.penaltyRate), + ["Expedited-Withdrawal-Fee"] = tostring(decreaseDelegateStakeResult.expeditedWithdrawalFee), + ["Amount-Withdrawn"] = tostring(decreaseDelegateStakeResult.amountWithdrawn), + }, + Data = json.encode(result and result.updatedDelegate or {}), + }) + end + ) + + addEventingHandler( + ActionMap.UpdateGatewaySettings, + utils.hasMatchingTag("Action", ActionMap.UpdateGatewaySettings), + function(msg) + local unsafeGateway = gar.getGatewayUnsafe(msg.From) + local updatedServices = utils.safeDecodeJson(msg.Tags.Services) + + assert(unsafeGateway, "Gateway not found") + assert(not msg.Tags.Services or updatedServices, "Services must be provided if Services-Json is provided") + -- keep defaults, but update any new ones + + -- If delegated staking is being fully enabled or disabled, clear the allowlist + local allowDelegatedStakingOverride = msg.Tags["Allow-Delegated-Staking"] + local enableOpenDelegatedStaking = allowDelegatedStakingOverride == "true" + local enableLimitedDelegatedStaking = allowDelegatedStakingOverride == "allowlist" + local disableDelegatedStaking = allowDelegatedStakingOverride == "false" + local shouldClearAllowlist = enableOpenDelegatedStaking or disableDelegatedStaking + local needNewAllowlist = not shouldClearAllowlist + and ( + enableLimitedDelegatedStaking + or (unsafeGateway.settings.allowedDelegatesLookup and msg.Tags["Allowed-Delegates"] ~= nil) + ) + + local updatedSettings = { + label = msg.Tags.Label or unsafeGateway.settings.label, + note = msg.Tags.Note or unsafeGateway.settings.note, + fqdn = msg.Tags.FQDN or unsafeGateway.settings.fqdn, + port = msg.Tags.Port or unsafeGateway.settings.port, + protocol = msg.Tags.Protocol or unsafeGateway.settings.protocol, + allowDelegatedStaking = enableOpenDelegatedStaking -- clear directive to enable + or enableLimitedDelegatedStaking -- clear directive to enable + or not disableDelegatedStaking -- NOT clear directive to DISABLE + and unsafeGateway.settings.allowDelegatedStaking, -- otherwise unspecified, so use previous setting + + allowedDelegates = needNewAllowlist and utils.splitAndTrimString(msg.Tags["Allowed-Delegates"], ",") -- replace the lookup list + or nil, -- change nothing + + minDelegatedStake = msg.Tags["Min-Delegated-Stake"] or unsafeGateway.settings.minDelegatedStake, + delegateRewardShareRatio = msg.Tags["Delegate-Reward-Share-Ratio"] + or unsafeGateway.settings.delegateRewardShareRatio, + properties = msg.Tags.Properties or unsafeGateway.settings.properties, + autoStake = not msg.Tags["Auto-Stake"] and unsafeGateway.settings.autoStake + or msg.Tags["Auto-Stake"] == "true", + } + + local observerAddress = msg.Tags["Observer-Address"] or unsafeGateway.observerAddress + local result = gar.updateGatewaySettings( + msg.From, + updatedSettings, + updatedServices, + observerAddress, + msg.Timestamp, + msg.Id + ) + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.UpdateGatewaySettings .. "-Notice" }, + Data = json.encode(result), + }) + end + ) + + addEventingHandler(ActionMap.ReassignName, utils.hasMatchingTag("Action", ActionMap.ReassignName), function(msg) + local newProcessId = msg.Tags["Process-Id"] + local name = string.lower(msg.Tags.Name) + local initiator = msg.Tags.Initiator + local allowUnsafeProcessId = msg.Tags["Allow-Unsafe-Addresses"] + assert(name and #name > 0, "Name is required") + assert(utils.isValidAddress(newProcessId, true), "Process Id must be a valid address.") + if initiator ~= nil then + assert(utils.isValidAddress(initiator, true), "Invalid initiator address.") + end + + local reassignment = arns.reassignName(name, msg.From, msg.Timestamp, newProcessId, allowUnsafeProcessId) + + Send(msg, { + Target = msg.From, + Action = ActionMap.ReassignName .. "-Notice", + Name = name, + Data = json.encode(reassignment), + }) + + if initiator ~= nil then + Send(msg, { + Target = initiator, + Action = ActionMap.ReassignName .. "-Notice", + Name = name, + Data = json.encode(reassignment), + }) + end + return + end) + + addEventingHandler( + ActionMap.SaveObservations, + utils.hasMatchingTag("Action", ActionMap.SaveObservations), + function(msg) + local reportTxId = msg.Tags["Report-Tx-Id"] + local failedGateways = utils.splitAndTrimString(msg.Tags["Failed-Gateways"], ",") + -- observers provide AR-IO-Epoch-Index, so check both + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or msg.Tags["AR-IO-Epoch-Index"] and tonumber(msg.Tags["AR-IO-Epoch-Index"]) + assert( + epochIndex and epochIndex >= 0 and utils.isInteger(epochIndex), + "Epoch index is required. Must be a number greater than 0." + ) + assert(utils.isValidArweaveAddress(reportTxId), "Invalid report tx id. Must be a valid Arweave address.") + for _, gateway in ipairs(failedGateways) do + assert(utils.isValidAddress(gateway, true), "Invalid failed gateway address: " .. gateway) + end + + local observations = + epochs.saveObservations(msg.From, reportTxId, failedGateways, epochIndex, msg.Timestamp) + if observations ~= nil then + local failureSummariesCount = utils.lengthOfTable(observations.failureSummaries or {}) + if failureSummariesCount > 0 then + msg.ioEvent:addField("Failure-Summaries-Count", failureSummariesCount) + end + local reportsCount = utils.lengthOfTable(observations.reports or {}) + if reportsCount > 0 then + msg.ioEvent:addField("Reports-Count", reportsCount) + end + end + + Send(msg, { + Target = msg.From, + Action = ActionMap.SaveObservations .. "-Notice", + Data = json.encode(observations), + }) + end + ) + + addEventingHandler(ActionMap.EpochSettings, utils.hasMatchingTag("Action", ActionMap.EpochSettings), function(msg) + local epochSettings = epochs.getSettings() + + Send(msg, { + Target = msg.From, + Action = ActionMap.EpochSettings .. "-Notice", + Data = json.encode(epochSettings), + }) + end) + + addEventingHandler( + ActionMap.DemandFactorSettings, + utils.hasMatchingTag("Action", ActionMap.DemandFactorSettings), + function(msg) + local demandFactorSettings = demand.getSettings() + Send(msg, { + Target = msg.From, + Action = ActionMap.DemandFactorSettings .. "-Notice", + Data = json.encode(demandFactorSettings), + }) + end + ) + + addEventingHandler( + ActionMap.GatewayRegistrySettings, + utils.hasMatchingTag("Action", ActionMap.GatewayRegistrySettings), + function(msg) + local gatewayRegistrySettings = gar.getSettings() + Send(msg, { + Target = msg.From, + Action = ActionMap.GatewayRegistrySettings .. "-Notice", + Data = json.encode(gatewayRegistrySettings), + }) + end + ) + + -- Reference: https://github.com/permaweb/aos/blob/eea71b68a4f89ac14bf6797804f97d0d39612258/blueprints/token.lua#L264-L280 + addEventingHandler("totalSupply", utils.hasMatchingTag("Action", ActionMap.TotalSupply), function(msg) + assert(msg.From ~= ao.id, "Cannot call Total-Supply from the same process!") + local totalSupplyDetails = token.computeTotalSupply() + addSupplyData(msg.ioEvent, { + totalTokenSupply = totalSupplyDetails.totalSupply, + }) + addTalliesData(msg.ioEvent, totalSupplyDetails.stateObjectTallies) + msg.ioEvent:addField("Last-Known-Total-Token-Supply", token.lastKnownTotalTokenSupply()) + Send(msg, { + Action = "Total-Supply", + Data = tostring(totalSupplyDetails.totalSupply), + Ticker = Ticker, + }) + end) + + addEventingHandler("totalTokenSupply", utils.hasMatchingTag("Action", ActionMap.TotalTokenSupply), function(msg) + local totalSupplyDetails = token.computeTotalSupply() + addSupplyData(msg.ioEvent, { + totalTokenSupply = totalSupplyDetails.totalSupply, + }) + addTalliesData(msg.ioEvent, totalSupplyDetails.stateObjectTallies) + msg.ioEvent:addField("Last-Known-Total-Token-Supply", token.lastKnownTotalTokenSupply()) + + Send(msg, { + Target = msg.From, + Action = ActionMap.TotalTokenSupply .. "-Notice", + ["Total-Supply"] = tostring(totalSupplyDetails.totalSupply), + ["Circulating-Supply"] = tostring(totalSupplyDetails.circulatingSupply), + ["Locked-Supply"] = tostring(totalSupplyDetails.lockedSupply), + ["Staked-Supply"] = tostring(totalSupplyDetails.stakedSupply), + ["Delegated-Supply"] = tostring(totalSupplyDetails.delegatedSupply), + ["Withdraw-Supply"] = tostring(totalSupplyDetails.withdrawSupply), + ["Protocol-Balance"] = tostring(totalSupplyDetails.protocolBalance), + Data = json.encode({ + -- NOTE: json.lua supports up to stringified numbers with 20 significant digits - numbers should always be stringified + total = totalSupplyDetails.totalSupply, + circulating = totalSupplyDetails.circulatingSupply, + locked = totalSupplyDetails.lockedSupply, + staked = totalSupplyDetails.stakedSupply, + delegated = totalSupplyDetails.delegatedSupply, + withdrawn = totalSupplyDetails.withdrawSupply, + protocolBalance = totalSupplyDetails.protocolBalance, + }), + }) + end) + + -- distribute rewards + -- NOTE: THIS IS A CRITICAL HANDLER AND WILL DISCARD THE MEMORY ON ERROR + addEventingHandler("distribute", function(msg) + return msg.Action == "Tick" or msg.Action == "Distribute" + end, function(msg) + local msgId = msg.Id + local blockHeight = tonumber(msg["Block-Height"]) + local hashchain = msg["Hash-Chain"] + local lastCreatedEpochIndex = LastCreatedEpochIndex + local lastDistributedEpochIndex = LastDistributedEpochIndex + local targetCurrentEpochIndex = epochs.getEpochIndexForTimestamp(msg.Timestamp) + + assert(blockHeight, "Block height is required") + assert(hashchain, "Hash chain is required") + + msg.ioEvent:addField("Last-Created-Epoch-Index", lastCreatedEpochIndex) + msg.ioEvent:addField("Last-Distributed-Epoch-Index", lastDistributedEpochIndex) + msg.ioEvent:addField("Target-Current-Epoch-Index", targetCurrentEpochIndex) + + -- tick and distribute rewards for every index between the last ticked epoch and the current epoch + local distributedEpochIndexes = {} + local newEpochIndexes = {} + local newPruneGatewaysResults = {} + local tickedRewardDistributions = {} + local totalTickedRewardsDistributed = 0 + + -- tick the demand factor all the way to the current period + local latestDemandFactor, newDemandFactors = demand.updateDemandFactor(msg.Timestamp) + if latestDemandFactor ~= nil then + Send(msg, { + Target = msg.From, + Action = "Demand-Factor-Updated-Notice", + Data = tostring(latestDemandFactor), + }) + end + + --[[ + Tick up to the target epoch index, this will create new epochs and distribute rewards for existing epochs + This should never fall behind, but in the case it does, it will create the epochs and distribute rewards for the epochs + accordingly. It should finish at the target epoch index - which is computed based on the message timestamp + ]] + -- + print("Ticking from " .. lastCreatedEpochIndex .. " to " .. targetCurrentEpochIndex) + for epochIndexToTick = lastCreatedEpochIndex, targetCurrentEpochIndex do + local tickResult = tick.tickEpoch(msg.Timestamp, blockHeight, hashchain, msgId, epochIndexToTick) + if tickResult.pruneGatewaysResult ~= nil then + table.insert(newPruneGatewaysResults, tickResult.pruneGatewaysResult) + end + if tickResult.maybeNewEpoch ~= nil then + print("Created epoch " .. tickResult.maybeNewEpoch.epochIndex) + LastCreatedEpochIndex = tickResult.maybeNewEpoch.epochIndex + table.insert(newEpochIndexes, tickResult.maybeNewEpoch.epochIndex) + Send(msg, { + Target = msg.From, + Action = "Epoch-Created-Notice", + ["Epoch-Index"] = tostring(tickResult.maybeNewEpoch.epochIndex), + Data = json.encode(tickResult.maybeNewEpoch), + }) + end + if tickResult.maybeDistributedEpoch ~= nil then + print("Distributed rewards for epoch " .. tickResult.maybeDistributedEpoch.epochIndex) + LastDistributedEpochIndex = tickResult.maybeDistributedEpoch.epochIndex + tickedRewardDistributions[tostring(tickResult.maybeDistributedEpoch.epochIndex)] = + tickResult.maybeDistributedEpoch.distributions.totalDistributedRewards + totalTickedRewardsDistributed = totalTickedRewardsDistributed + + tickResult.maybeDistributedEpoch.distributions.totalDistributedRewards + table.insert(distributedEpochIndexes, tickResult.maybeDistributedEpoch.epochIndex) + Send(msg, { + Target = msg.From, + Action = "Epoch-Distribution-Notice", + ["Epoch-Index"] = tostring(tickResult.maybeDistributedEpoch.epochIndex), + Data = json.encode(tickResult.maybeDistributedEpoch), + }) + end + end + if #distributedEpochIndexes > 0 then + msg.ioEvent:addField("Distributed-Epoch-Indexes", distributedEpochIndexes) + end + if #newEpochIndexes > 0 then + msg.ioEvent:addField("New-Epoch-Indexes", newEpochIndexes) + -- Only print the prescribed observers of the newest epoch + local newestEpoch = epochs.getEpoch(math.max(table.unpack(newEpochIndexes))) + local prescribedObserverAddresses = {} + local prescribedObserverGatewayAddresses = {} + if newestEpoch ~= nil and newestEpoch.prescribedObservers ~= nil then + for observerAddress, gatewayAddress in pairs(newestEpoch.prescribedObservers) do + table.insert(prescribedObserverAddresses, observerAddress) + table.insert(prescribedObserverGatewayAddresses, gatewayAddress) + end + end + msg.ioEvent:addField("Prescribed-Observer-Addresses", prescribedObserverAddresses) + msg.ioEvent:addField("Prescribed-Observer-Gateway-Addresses", prescribedObserverGatewayAddresses) + end + local updatedDemandFactorCount = utils.lengthOfTable(newDemandFactors) + if updatedDemandFactorCount > 0 then + local updatedDemandFactorPeriods = {} + local updatedDemandFactorValues = {} + for _, df in ipairs(newDemandFactors) do + table.insert(updatedDemandFactorPeriods, df.period) + table.insert(updatedDemandFactorValues, df.demandFactor) + end + msg.ioEvent:addField("New-Demand-Factor-Periods", updatedDemandFactorPeriods) + msg.ioEvent:addField("New-Demand-Factor-Values", updatedDemandFactorValues) + msg.ioEvent:addField("New-Demand-Factor-Count", updatedDemandFactorCount) + end + if #newPruneGatewaysResults > 0 then + -- Reduce the prune gateways results and then track changes + --- @type PruneGatewaysResult + local aggregatedPruneGatewaysResult = utils.reduce( + newPruneGatewaysResults, + --- @param acc PruneGatewaysResult + --- @param _ any + --- @param pruneGatewaysResult PruneGatewaysResult + function(acc, _, pruneGatewaysResult) + for _, address in pairs(pruneGatewaysResult.prunedGateways) do + table.insert(acc.prunedGateways, address) + end + for address, slashAmount in pairs(pruneGatewaysResult.slashedGateways) do + acc.slashedGateways[address] = (acc.slashedGateways[address] or 0) + slashAmount + end + acc.gatewayStakeReturned = acc.gatewayStakeReturned + pruneGatewaysResult.gatewayStakeReturned + acc.delegateStakeReturned = acc.delegateStakeReturned + pruneGatewaysResult.delegateStakeReturned + acc.gatewayStakeWithdrawing = acc.gatewayStakeWithdrawing + + pruneGatewaysResult.gatewayStakeWithdrawing + acc.delegateStakeWithdrawing = acc.delegateStakeWithdrawing + + pruneGatewaysResult.delegateStakeWithdrawing + acc.stakeSlashed = acc.stakeSlashed + pruneGatewaysResult.stakeSlashed + -- Upsert to the latest tallies if available + acc.gatewayObjectTallies = pruneGatewaysResult.gatewayObjectTallies or acc.gatewayObjectTallies + return acc + end, + { + prunedGateways = {}, + slashedGateways = {}, + gatewayStakeReturned = 0, + delegateStakeReturned = 0, + gatewayStakeWithdrawing = 0, + delegateStakeWithdrawing = 0, + stakeSlashed = 0, + } + ) + addPruneGatewaysResult(msg.ioEvent, aggregatedPruneGatewaysResult) + end + if utils.lengthOfTable(tickedRewardDistributions) > 0 then + msg.ioEvent:addField("Ticked-Reward-Distributions", tickedRewardDistributions) + msg.ioEvent:addField("Total-Ticked-Rewards-Distributed", totalTickedRewardsDistributed) + LastKnownCirculatingSupply = LastKnownCirculatingSupply + totalTickedRewardsDistributed + end + + local gwStats = gatewayStats() + msg.ioEvent:addField("Joined-Gateways-Count", gwStats.joined) + msg.ioEvent:addField("Leaving-Gateways-Count", gwStats.leaving) + addSupplyData(msg.ioEvent) + + -- Send a single tick notice to the sender after all epochs have been ticked + Send(msg, { + Target = msg.From, + Action = "Tick-Notice", + Data = json.encode({ + distributedEpochIndexes = distributedEpochIndexes, + newEpochIndexes = newEpochIndexes, + newDemandFactors = newDemandFactors, + newPruneGatewaysResults = newPruneGatewaysResults, + tickedRewardDistributions = tickedRewardDistributions, + totalTickedRewardsDistributed = totalTickedRewardsDistributed, + }), + }) + end, CRITICAL) + + -- READ HANDLERS + + addEventingHandler(ActionMap.Info, Handlers.utils.hasMatchingTag("Action", ActionMap.Info), function(msg) + local handlers = Handlers.list + local handlerNames = {} + + for _, handler in ipairs(handlers) do + table.insert(handlerNames, handler.name) + end + + local memoryKiBUsed = collectgarbage("count") + + Send(msg, { + Target = msg.From, + Action = "Info-Notice", + Tags = { + Name = Name, + Ticker = Ticker, + Logo = Logo, + Owner = Owner, + Denomination = tostring(Denomination), + LastCreatedEpochIndex = tostring(LastCreatedEpochIndex), + LastDistributedEpochIndex = tostring(LastDistributedEpochIndex), + Handlers = json.encode(handlerNames), + ["Memory-KiB-Used"] = tostring(memoryKiBUsed), + }, + Data = json.encode({ + Name = Name, + Ticker = Ticker, + Logo = Logo, + Owner = Owner, + Denomination = Denomination, + LastCreatedEpochIndex = LastCreatedEpochIndex, + LastDistributedEpochIndex = LastDistributedEpochIndex, + Handlers = handlerNames, + ["Memory-KiB-Used"] = memoryKiBUsed, + }), + }) + end) + + addEventingHandler(ActionMap.Gateway, Handlers.utils.hasMatchingTag("Action", ActionMap.Gateway), function(msg) + local gateway = gar.getCompactGateway(msg.Tags.Address or msg.From) + Send(msg, { + Target = msg.From, + Action = "Gateway-Notice", + Gateway = msg.Tags.Address or msg.From, + Data = json.encode(gateway), + }) + end) + + --- NOTE: this handler does not scale well, but various ecosystem apps rely on it (arconnect, ao.link, etc.) + addEventingHandler(ActionMap.Balances, Handlers.utils.hasMatchingTag("Action", ActionMap.Balances), function(msg) + Send(msg, { + Target = msg.From, + Action = "Balances-Notice", + Data = json.encode(Balances), + }) + end) + + addEventingHandler(ActionMap.Balance, Handlers.utils.hasMatchingTag("Action", ActionMap.Balance), function(msg) + local target = msg.Tags.Target or msg.Tags.Address or msg.Tags.Recipient or msg.From + local balance = balances.getBalance(target) + + -- must adhere to token.lua spec defined by https://github.com/permaweb/aos/blob/15dd81ee596518e2f44521e973b8ad1ce3ee9945/blueprints/token.lua + Send(msg, { + Target = msg.From, + Action = "Balance-Notice", + Account = target, + Data = tostring(balance), + Balance = tostring(balance), + Ticker = Ticker, + }) + end) + + addEventingHandler(ActionMap.DemandFactor, utils.hasMatchingTag("Action", ActionMap.DemandFactor), function(msg) + local demandFactor = demand.getDemandFactor() + Send(msg, { + Target = msg.From, + Action = "Demand-Factor-Notice", + Data = json.encode(demandFactor), + }) + end) + + addEventingHandler( + ActionMap.DemandFactorInfo, + utils.hasMatchingTag("Action", ActionMap.DemandFactorInfo), + function(msg) + local result = demand.getDemandFactorInfo() + Send(msg, { Target = msg.From, Action = "Demand-Factor-Info-Notice", Data = json.encode(result) }) + end + ) + + addEventingHandler(ActionMap.Record, utils.hasMatchingTag("Action", ActionMap.Record), function(msg) + local record = arns.getRecord(msg.Tags.Name) + + local recordNotice = { + Target = msg.From, + Action = "Record-Notice", + Name = msg.Tags.Name, + Data = json.encode(record), + } + + -- Add forwarded tags to the credit and debit notice messages + for tagName, tagValue in pairs(msg) do + -- Tags beginning with "X-" are forwarded + if string.sub(tagName, 1, 2) == "X-" then + recordNotice[tagName] = tagValue + end + end + + -- Send Record-Notice + Send(msg, recordNotice) + end) + + addEventingHandler(ActionMap.Epoch, utils.hasMatchingTag("Action", ActionMap.Epoch), function(msg) + -- check if the epoch number is provided, if not get the epoch number from the timestamp + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) + local epoch = epochs.getEpoch(epochIndex) + if epoch then + -- populate the prescribed observers with weights for the epoch, this helps improve DX of downstream apps + epoch.prescribedObservers = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) + end + if epoch and epoch.distributions then + -- remove the distributions data from the epoch to avoid unbounded response payloads + epoch.distributions.rewards = nil + end + Send(msg, { Target = msg.From, Action = "Epoch-Notice", Data = json.encode(epoch) }) + end) + + addEventingHandler( + ActionMap.PrescribedObservers, + utils.hasMatchingTag("Action", ActionMap.PrescribedObservers), + function(msg) + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) + local prescribedObserversWithWeights = epochs.getPrescribedObserversWithWeightsForEpoch(epochIndex) + Send(msg, { + Target = msg.From, + Action = "Prescribed-Observers-Notice", + Data = json.encode(prescribedObserversWithWeights), + }) + end + ) + + addEventingHandler(ActionMap.Observations, utils.hasMatchingTag("Action", ActionMap.Observations), function(msg) + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) + local observations = epochs.getObservationsForEpoch(epochIndex) + Send(msg, { + Target = msg.From, + Action = "Observations-Notice", + EpochIndex = tostring(epochIndex), + Data = json.encode(observations), + }) + end) + + addEventingHandler( + ActionMap.PrescribedNames, + utils.hasMatchingTag("Action", ActionMap.PrescribedNames), + function(msg) + -- check if the epoch number is provided, if not get the epoch number from the timestamp + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) + local prescribedNames = epochs.getPrescribedNamesForEpoch(epochIndex) + Send(msg, { + Target = msg.From, + Action = "Prescribed-Names-Notice", + Data = json.encode(prescribedNames), + }) + end + ) + + addEventingHandler(ActionMap.Distributions, utils.hasMatchingTag("Action", ActionMap.Distributions), function(msg) + -- check if the epoch number is provided, if not get the epoch number from the timestamp + local epochIndex = msg.Tags["Epoch-Index"] and tonumber(msg.Tags["Epoch-Index"]) + or epochs.getEpochIndexForTimestamp(msg.Timestamp) + local distributions = epochs.getDistributionsForEpoch(epochIndex) + Send(msg, { + Target = msg.From, + Action = "Distributions-Notice", + Data = json.encode(distributions), + }) + end) + + addEventingHandler("epochRewards", utils.hasMatchingTag("Action", ActionMap.EpochRewards), function(msg) + local page = utils.parsePaginationTags(msg) + + local epochRewards = epochs.getEligibleRewardsForEpoch( + msg.Timestamp, + page.cursor, + page.limit, + page.sortBy or "cursorId", + page.sortOrder + ) + + Send(msg, { + Target = msg.From, + Action = "Epoch-Eligible-Rewards-Notice", + Data = json.encode(epochRewards), + }) + end) + + addEventingHandler("paginatedReservedNames", utils.hasMatchingTag("Action", ActionMap.ReservedNames), function(msg) + local page = utils.parsePaginationTags(msg) + local reservedNames = + arns.getPaginatedReservedNames(page.cursor, page.limit, page.sortBy or "name", page.sortOrder) + Send(msg, { Target = msg.From, Action = "Reserved-Names-Notice", Data = json.encode(reservedNames) }) + end) + + addEventingHandler(ActionMap.ReservedName, utils.hasMatchingTag("Action", ActionMap.ReservedName), function(msg) + local name = msg.Tags.Name and string.lower(msg.Tags.Name) + assert(name, "Name is required") + local reservedName = arns.getReservedName(name) + Send(msg, { + Target = msg.From, + Action = "Reserved-Name-Notice", + ReservedName = msg.Tags.Name, + Data = json.encode(reservedName), + }) + end) + + addEventingHandler(ActionMap.Vault, utils.hasMatchingTag("Action", ActionMap.Vault), function(msg) + local address = msg.Tags.Address or msg.From + local vaultId = msg.Tags["Vault-Id"] + local vault = vaults.getVault(address, vaultId) + assert(vault, "Vault not found") + Send(msg, { + Target = msg.From, + Action = "Vault-Notice", + Address = address, + ["Vault-Id"] = vaultId, + Data = json.encode(vault), + }) + end) + + -- Pagination handlers + + addEventingHandler("paginatedRecords", function(msg) + return msg.Action == "Paginated-Records" or msg.Action == ActionMap.Records + end, function(msg) + local page = utils.parsePaginationTags(msg) + local result = arns.getPaginatedRecords( + page.cursor, + page.limit, + page.sortBy or "startTimestamp", + page.sortOrder, + page.filters + ) + Send(msg, { Target = msg.From, Action = "Records-Notice", Data = json.encode(result) }) + end) + + addEventingHandler("paginatedGateways", function(msg) + return msg.Action == "Paginated-Gateways" or msg.Action == ActionMap.Gateways + end, function(msg) + local page = utils.parsePaginationTags(msg) + local result = + gar.getPaginatedGateways(page.cursor, page.limit, page.sortBy or "startTimestamp", page.sortOrder or "desc") + Send(msg, { Target = msg.From, Action = "Gateways-Notice", Data = json.encode(result) }) + end) + + addEventingHandler("paginatedBalances", utils.hasMatchingTag("Action", "Paginated-Balances"), function(msg) + local page = utils.parsePaginationTags(msg) + local walletBalances = + balances.getPaginatedBalances(page.cursor, page.limit, page.sortBy or "balance", page.sortOrder) + Send(msg, { Target = msg.From, Action = "Balances-Notice", Data = json.encode(walletBalances) }) + end) + + addEventingHandler("paginatedVaults", function(msg) + return msg.Action == "Paginated-Vaults" or msg.Action == ActionMap.Vaults + end, function(msg) + local page = utils.parsePaginationTags(msg) + local pageVaults = vaults.getPaginatedVaults(page.cursor, page.limit, page.sortOrder, page.sortBy) + Send(msg, { Target = msg.From, Action = "Vaults-Notice", Data = json.encode(pageVaults) }) + end) + + addEventingHandler("paginatedDelegates", function(msg) + return msg.Action == "Paginated-Delegates" or msg.Action == ActionMap.Delegates + end, function(msg) + local page = utils.parsePaginationTags(msg) + local result = gar.getPaginatedDelegates( + msg.Tags.Address or msg.From, + page.cursor, + page.limit, + page.sortBy or "startTimestamp", + page.sortOrder + ) + Send(msg, { Target = msg.From, Action = "Delegates-Notice", Data = json.encode(result) }) + end) + + addEventingHandler( + "paginatedAllowedDelegates", + utils.hasMatchingTag("Action", "Paginated-Allowed-Delegates"), + function(msg) + local page = utils.parsePaginationTags(msg) + local result = + gar.getPaginatedAllowedDelegates(msg.Tags.Address or msg.From, page.cursor, page.limit, page.sortOrder) + Send(msg, { Target = msg.From, Action = "Allowed-Delegates-Notice", Data = json.encode(result) }) + end + ) + + -- END READ HANDLERS + + addEventingHandler("releaseName", utils.hasMatchingTag("Action", ActionMap.ReleaseName), function(msg) + -- validate the name and process id exist, then create the returned name + local name = msg.Tags.Name and string.lower(msg.Tags.Name) + local processId = msg.From + local initiator = msg.Tags.Initiator or msg.From + + assert(name and #name > 0, "Name is required") -- this could be an undername, so we don't want to assertValidArNSName + assert(processId and utils.isValidAddress(processId, true), "Process-Id must be a valid address") + assert(initiator and utils.isValidAddress(initiator, true), "Initiator is required") + local record = arns.getRecord(name) + assert(record, "Record not found") + assert(record.type == "permabuy", "Only permabuy names can be released") + assert(record.processId == processId, "Process-Id mismatch") + assert( + #primaryNames.getPrimaryNamesForBaseName(name) == 0, + "Primary names are associated with this name. They must be removed before releasing the name." + ) + + -- we should be able to create the returned name here + local removedRecord = arns.removeRecord(name) + local removedPrimaryNamesAndOwners = primaryNames.removePrimaryNamesForBaseName(name) -- NOTE: this should be empty if there are no primary names allowed before release + local returnedName = arns.createReturnedName(name, msg.Timestamp, initiator) + local returnedNameData = { + removedRecord = removedRecord, + removedPrimaryNamesAndOwners = removedPrimaryNamesAndOwners, + returnedName = returnedName, + } + + addReturnedNameResultFields(msg.ioEvent, { + name = name, + returnedName = returnedNameData.returnedName, + removedRecord = returnedNameData.removedRecord, + removedPrimaryNamesAndOwners = returnedNameData.removedPrimaryNamesAndOwners, + }) + + -- note: no change to token supply here - only on buy record of returned name + msg.ioEvent:addField("Returned-Name-Count", utils.lengthOfTable(NameRegistry.returned)) + msg.ioEvent:addField("Records-Count", utils.lengthOfTable(NameRegistry.records)) + + local releaseNameData = { + name = name, + startTimestamp = returnedName.startTimestamp, + endTimestamp = returnedName.startTimestamp + constants.RETURNED_NAME_DURATION_MS, + initiator = returnedName.initiator, + } + + -- send to the initiator and the process that released the name + Send(msg, { + Target = initiator, + Action = "Returned-Name-Notice", + Name = name, + Data = json.encode(releaseNameData), + }) + Send(msg, { + Target = processId, + Action = "Returned-Name-Notice", + Name = name, + Data = json.encode(releaseNameData), + }) + end) + + addEventingHandler(ActionMap.ReturnedNames, utils.hasMatchingTag("Action", ActionMap.ReturnedNames), function(msg) + local page = utils.parsePaginationTags(msg) + local returnedNames = arns.getReturnedNamesUnsafe() + + --- @type ReturnedNameData[] -- Returned Names with End Timestamp and Premium Multiplier + local returnedNameDataArray = {} + + for _, v in pairs(returnedNames) do + table.insert(returnedNameDataArray, { + name = v.name, + startTimestamp = v.startTimestamp, + endTimestamp = v.startTimestamp + constants.RETURNED_NAME_DURATION_MS, + initiator = v.initiator, + premiumMultiplier = arns.getReturnedNamePremiumMultiplier(v.startTimestamp, msg.Timestamp), + }) + end + + -- paginate the returnedNames by name, showing returnedNames nearest to the endTimestamp first + local paginatedReturnedNames = utils.paginateTableWithCursor( + returnedNameDataArray, + page.cursor, + "name", + page.limit, + page.sortBy or "endTimestamp", + page.sortOrder or "asc" + ) + Send(msg, { + Target = msg.From, + Action = ActionMap.ReturnedNames .. "-Notice", + Data = json.encode(paginatedReturnedNames), + }) + end) + + addEventingHandler(ActionMap.ReturnedName, utils.hasMatchingTag("Action", ActionMap.ReturnedName), function(msg) + local name = string.lower(msg.Tags.Name) + local returnedName = arns.getReturnedNameUnsafe(name) + + assert(returnedName, "Returned name not found") + + Send(msg, { + Target = msg.From, + Action = ActionMap.ReturnedName .. "-Notice", + Data = json.encode({ + name = returnedName.name, + startTimestamp = returnedName.startTimestamp, + endTimestamp = returnedName.startTimestamp + constants.RETURNED_NAME_DURATION_MS, + initiator = returnedName.initiator, + premiumMultiplier = arns.getReturnedNamePremiumMultiplier(returnedName.startTimestamp, msg.Timestamp), + }), + }) + end) + + addEventingHandler("allowDelegates", utils.hasMatchingTag("Action", ActionMap.AllowDelegates), function(msg) + local allowedDelegates = msg.Tags["Allowed-Delegates"] + and utils.splitAndTrimString(msg.Tags["Allowed-Delegates"], ",") + assert(allowedDelegates and #allowedDelegates > 0, "Allowed-Delegates is required") + msg.ioEvent:addField("Input-New-Delegates-Count", utils.lengthOfTable(allowedDelegates)) + local result = gar.allowDelegates(allowedDelegates, msg.From) + + if result ~= nil then + msg.ioEvent:addField("New-Allowed-Delegates", result.newAllowedDelegates or {}) + msg.ioEvent:addField("New-Allowed-Delegates-Count", utils.lengthOfTable(result.newAllowedDelegates)) + msg.ioEvent:addField( + "Gateway-Total-Allowed-Delegates", + utils.lengthOfTable(result.gateway and result.gateway.settings.allowedDelegatesLookup or {}) + + utils.lengthOfTable(result.gateway and result.gateway.delegates or {}) + ) + end + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.AllowDelegates .. "-Notice" }, + Data = json.encode(result and result.newAllowedDelegates or {}), + }) + end) + + addEventingHandler("disallowDelegates", utils.hasMatchingTag("Action", ActionMap.DisallowDelegates), function(msg) + local disallowedDelegates = msg.Tags["Disallowed-Delegates"] + and utils.splitAndTrimString(msg.Tags["Disallowed-Delegates"], ",") + assert(disallowedDelegates and #disallowedDelegates > 0, "Disallowed-Delegates is required") + msg.ioEvent:addField("Input-Disallowed-Delegates-Count", utils.lengthOfTable(disallowedDelegates)) + local result = gar.disallowDelegates(disallowedDelegates, msg.From, msg.Id, msg.Timestamp) + if result ~= nil then + msg.ioEvent:addField("New-Disallowed-Delegates", result.removedDelegates or {}) + msg.ioEvent:addField("New-Disallowed-Delegates-Count", utils.lengthOfTable(result.removedDelegates)) + msg.ioEvent:addField( + "Gateway-Total-Allowed-Delegates", + utils.lengthOfTable(result.gateway and result.gateway.settings.allowedDelegatesLookup or {}) + + utils.lengthOfTable(result.gateway and result.gateway.delegates or {}) + ) + end + + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.DisallowDelegates .. "-Notice" }, + Data = json.encode(result and result.removedDelegates or {}), + }) + end) + + addEventingHandler("paginatedDelegations", utils.hasMatchingTag("Action", "Paginated-Delegations"), function(msg) + local address = msg.Tags.Address or msg.From + local page = utils.parsePaginationTags(msg) + + assert(utils.isValidAddress(address, true), "Invalid address.") + + local result = gar.getPaginatedDelegations(address, page.cursor, page.limit, page.sortBy, page.sortOrder) + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.Delegations .. "-Notice" }, + Data = json.encode(result), + }) + end) + + addEventingHandler( + ActionMap.RedelegateStake, + utils.hasMatchingTag("Action", ActionMap.RedelegateStake), + function(msg) + local sourceAddress = msg.Tags.Source + local targetAddress = msg.Tags.Target + local delegateAddress = msg.From + local quantity = msg.Tags.Quantity or nil + local vaultId = msg.Tags["Vault-Id"] + + assert(utils.isValidAddress(sourceAddress, true), "Invalid source gateway address") + assert(utils.isValidAddress(targetAddress, true), "Invalid target gateway address") + assert(utils.isValidAddress(delegateAddress, true), "Invalid delegator address") + if vaultId then + assert(utils.isValidAddress(vaultId, true), "Invalid vault id") + end + + assert( + quantity and quantity > 0 and utils.isInteger(quantity), + "Invalid quantity. Must be integer greater than 0" + ) + local redelegationResult = gar.redelegateStake({ + sourceAddress = sourceAddress, + targetAddress = targetAddress, + delegateAddress = delegateAddress, + qty = quantity, + currentTimestamp = msg.Timestamp, + vaultId = vaultId, + }) + + local redelegationFee = redelegationResult.redelegationFee + local stakeMoved = quantity - redelegationFee + + local isStakeMovingFromDelegateToOperator = delegateAddress == targetAddress + local isStakeMovingFromOperatorToDelegate = delegateAddress == sourceAddress + local isStakeMovingFromWithdrawal = vaultId ~= nil + + --- Stake Direction Codings: + --- dw2o = Delegate Withdrawal to Operator Stake + --- d2o = Delegate Stake to Operator Stake + --- ow2d = Operator Withdrawal to Delegate Stake + --- o2d = Operator Stake to Delegate Stake + --- dw2d = Delegate Withdrawal to Other Delegate Stake + --- d2d = Delegate Stake to Other Delegate Stake + msg.ioEvent:addField( + "Stake-Direction", + isStakeMovingFromDelegateToOperator and (isStakeMovingFromWithdrawal and "dw2o" or "d2o") + or ( + isStakeMovingFromOperatorToDelegate and (isStakeMovingFromWithdrawal and "ow2d" or "o2d") + or (isStakeMovingFromWithdrawal and "dw2d" or "d2d") + ) + ) + + if isStakeMovingFromWithdrawal then + LastKnownWithdrawSupply = LastKnownWithdrawSupply - quantity + end + + if isStakeMovingFromDelegateToOperator then + if not isStakeMovingFromWithdrawal then + LastKnownDelegatedSupply = LastKnownDelegatedSupply - quantity + end + LastKnownStakedSupply = LastKnownStakedSupply + stakeMoved + elseif isStakeMovingFromOperatorToDelegate then + if not isStakeMovingFromWithdrawal then + LastKnownStakedSupply = LastKnownStakedSupply - quantity + end + LastKnownDelegatedSupply = LastKnownDelegatedSupply + stakeMoved + elseif isStakeMovingFromWithdrawal then + LastKnownStakedSupply = LastKnownStakedSupply + stakeMoved + else + LastKnownStakedSupply = LastKnownStakedSupply - redelegationFee + end + + if redelegationFee > 0 then + msg.ioEvent:addField("Redelegation-Fee", redelegationFee) + end + addSupplyData(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Tags = { + Action = ActionMap.RedelegateStake .. "-Notice", + }, + Data = json.encode(redelegationResult), + }) + end + ) + + addEventingHandler( + ActionMap.RedelegationFee, + utils.hasMatchingTag("Action", ActionMap.RedelegationFee), + function(msg) + local delegateAddress = msg.Tags.Address or msg.From + assert(utils.isValidAddress(delegateAddress, true), "Invalid delegator address") + local feeResult = gar.getRedelegationFee(delegateAddress) + Send(msg, { + Target = msg.From, + Tags = { Action = ActionMap.RedelegationFee .. "-Notice" }, + Data = json.encode(feeResult), + }) + end + ) + + --- PRIMARY NAMES + addEventingHandler("removePrimaryName", utils.hasMatchingTag("Action", ActionMap.RemovePrimaryNames), function(msg) + local names = utils.splitAndTrimString(msg.Tags.Names, ",") + assert(names and #names > 0, "Names are required") + assert(msg.From, "From is required") + local notifyOwners = msg.Tags["Notify-Owners"] and msg.Tags["Notify-Owners"] == "true" or false + + local removedPrimaryNamesAndOwners = primaryNames.removePrimaryNames(names, msg.From) + local removedPrimaryNamesCount = utils.lengthOfTable(removedPrimaryNamesAndOwners) + msg.ioEvent:addField("Num-Removed-Primary-Names", removedPrimaryNamesCount) + if removedPrimaryNamesCount > 0 then + msg.ioEvent:addField( + "Removed-Primary-Names", + utils.map(removedPrimaryNamesAndOwners, function(_, v) + return v.name + end) + ) + msg.ioEvent:addField( + "Removed-Primary-Name-Owners", + utils.map(removedPrimaryNamesAndOwners, function(_, v) + return v.owner + end) + ) + end + addPrimaryNameCounts(msg.ioEvent) + + Send(msg, { + Target = msg.From, + Action = ActionMap.RemovePrimaryNames .. "-Notice", + Data = json.encode(removedPrimaryNamesAndOwners), + }) + + -- Send messages to the owners of the removed primary names if the notifyOwners flag is true + if notifyOwners then + for _, removedPrimaryNameAndOwner in pairs(removedPrimaryNamesAndOwners) do + Send(msg, { + Target = removedPrimaryNameAndOwner.owner, + Action = ActionMap.RemovePrimaryNames .. "-Notice", + Tags = { Name = removedPrimaryNameAndOwner.name }, + Data = json.encode(removedPrimaryNameAndOwner), + }) + end + end + end) + + addEventingHandler("requestPrimaryName", utils.hasMatchingTag("Action", ActionMap.RequestPrimaryName), function(msg) + local fundFrom = msg.Tags["Fund-From"] + local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil + local initiator = msg.From + assert(name, "Name is required") + assert(initiator, "Initiator is required") + assertValidFundFrom(fundFrom) + + local primaryNameResult = + primaryNames.createPrimaryNameRequest(name, initiator, msg.Timestamp, msg.Id, fundFrom) + + addPrimaryNameRequestData(msg.ioEvent, primaryNameResult) + + --- if the from is the new owner, then send an approved notice to the from + if primaryNameResult.newPrimaryName then + Send(msg, { + Target = msg.From, + Action = ActionMap.ApprovePrimaryNameRequest .. "-Notice", + Data = json.encode(primaryNameResult), + }) + return + end + + if primaryNameResult.request then + --- send a notice to the msg.From, and the base name owner + Send(msg, { + Target = msg.From, + Action = ActionMap.PrimaryNameRequest .. "-Notice", + Data = json.encode(primaryNameResult), + }) + Send(msg, { + Target = primaryNameResult.baseNameOwner, + Action = ActionMap.PrimaryNameRequest .. "-Notice", + Data = json.encode(primaryNameResult), + }) + end + end) + + addEventingHandler( + "approvePrimaryNameRequest", + utils.hasMatchingTag("Action", ActionMap.ApprovePrimaryNameRequest), + function(msg) + local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil + local recipient = msg.Tags.Recipient or msg.From + + assert(name, "Name is required") + assert(recipient, "Recipient is required") + assert(msg.From, "From is required") + + local approvedPrimaryNameResult = + primaryNames.approvePrimaryNameRequest(recipient, name, msg.From, msg.Timestamp) + addPrimaryNameRequestData(msg.ioEvent, approvedPrimaryNameResult) + + --- send a notice to the from + Send(msg, { + Target = msg.From, + Action = ActionMap.ApprovePrimaryNameRequest .. "-Notice", + Data = json.encode(approvedPrimaryNameResult), + }) + --- send a notice to the owner + Send(msg, { + Target = approvedPrimaryNameResult.newPrimaryName.owner, + Action = ActionMap.ApprovePrimaryNameRequest .. "-Notice", + Data = json.encode(approvedPrimaryNameResult), + }) + end + ) + + --- Handles forward and reverse resolutions (e.g. name -> address and address -> name) + addEventingHandler("getPrimaryNameData", utils.hasMatchingTag("Action", ActionMap.PrimaryName), function(msg) + local name = msg.Tags.Name and string.lower(msg.Tags.Name) or nil + local address = msg.Tags.Address or msg.From + local primaryNameData = name and primaryNames.getPrimaryNameDataWithOwnerFromName(name) + or address and primaryNames.getPrimaryNameDataWithOwnerFromAddress(address) + assert(primaryNameData, "Primary name data not found") + return Send(msg, { + Target = msg.From, + Action = ActionMap.PrimaryName .. "-Notice", + Tags = { Owner = primaryNameData.owner, Name = primaryNameData.name }, + Data = json.encode(primaryNameData), + }) + end) + + addEventingHandler( + "getPrimaryNameRequest", + utils.hasMatchingTag("Action", ActionMap.PrimaryNameRequest), + function(msg) + local initiator = msg.Tags.Initiator or msg.From + local result = primaryNames.getPrimaryNameRequest(initiator) + assert(result, "Primary name request not found for " .. initiator) + return Send(msg, { + Target = msg.From, + Action = ActionMap.PrimaryNameRequests .. "-Notice", + Data = json.encode({ + name = result.name, + startTimestamp = result.startTimestamp, + endTimestamp = result.endTimestamp, + initiator = initiator, + }), + }) + end + ) + + addEventingHandler( + "getPaginatedPrimaryNameRequests", + utils.hasMatchingTag("Action", ActionMap.PrimaryNameRequests), + function(msg) + local page = utils.parsePaginationTags(msg) + local result = primaryNames.getPaginatedPrimaryNameRequests( + page.cursor, + page.limit, + page.sortBy or "startTimestamp", + page.sortOrder or "asc" + ) + return Send(msg, { + Target = msg.From, + Action = ActionMap.PrimaryNameRequests .. "-Notice", + Data = json.encode(result), + }) + end + ) + + addEventingHandler("getPaginatedPrimaryNames", utils.hasMatchingTag("Action", ActionMap.PrimaryNames), function(msg) + local page = utils.parsePaginationTags(msg) + local result = primaryNames.getPaginatedPrimaryNames( + page.cursor, + page.limit, + page.sortBy or "name", + page.sortOrder or "asc" + ) + + return Send(msg, { + Target = msg.From, + Action = ActionMap.PrimaryNames .. "-Notice", + Data = json.encode(result), + }) + end) + + addEventingHandler( + "getPaginatedGatewayVaults", + utils.hasMatchingTag("Action", "Paginated-Gateway-Vaults"), + function(msg) + local page = utils.parsePaginationTags(msg) + local gatewayAddress = utils.formatAddress(msg.Tags.Address or msg.From) + assert(utils.isValidAddress(gatewayAddress, true), "Invalid gateway address") + local result = gar.getPaginatedVaultsForGateway( + gatewayAddress, + page.cursor, + page.limit, + page.sortBy or "endTimestamp", + page.sortOrder or "desc" + ) + return Send(msg, { + Target = msg.From, + Action = "Gateway-Vaults-Notice", + Data = json.encode(result), + }) + end + ) + + addEventingHandler("getPruningTimestamps", utils.hasMatchingTag("Action", "Pruning-Timestamps"), function(msg) + addNextPruneTimestampsData(msg.ioEvent) + return Send(msg, { + Target = msg.From, + Action = "Pruning-Timestamps-Notice", + Data = json.encode({ + returnedNames = arns.nextReturnedNamesPruneTimestamp(), + gateways = gar.nextGatewaysPruneTimestamp(), + primaryNames = primaryNames.nextPrimaryNamesPruneTimestamp(), + records = arns.nextRecordsPruneTimestamp(), + redelegations = gar.nextRedelegationsPruneTimestamp(), + vaults = vaults.nextVaultsPruneTimestamp(), + }), + }) + end) + + addEventingHandler("allPaginatedDelegates", utils.hasMatchingTag("Action", "All-Paginated-Delegates"), function(msg) + local page = utils.parsePaginationTags(msg) + local result = gar.getPaginatedDelegatesFromAllGateways(page.cursor, page.limit, page.sortBy, page.sortOrder) + Send(msg, { Target = msg.From, Action = "All-Delegates-Notice", Data = json.encode(result) }) + end) + + addEventingHandler("allPaginatedGatewayVaults", utils.hasMatchingTag("Action", "All-Gateway-Vaults"), function(msg) + local page = utils.parsePaginationTags(msg) + local result = gar.getPaginatedVaultsFromAllGateways(page.cursor, page.limit, page.sortBy, page.sortOrder) + Send(msg, { Target = msg.From, Action = "All-Gateway-Vaults-Notice", Data = json.encode(result) }) + end) + + return main +end + +_G.package.loaded[".src.main"] = _loaded_mod_src_main() + +-- module: ".src.init" +local function _loaded_mod_src_init() + local src = {} + + function src.init() + require(".src.globals") + require(".src.main") + end + + return src +end + +_G.package.loaded[".src.init"] = _loaded_mod_src_init() + +-- module: ".state.init" +local function _loaded_mod_state_init() + local state = {} + + --[[ + To load any state, add raw lua files to this directory and require them here. + When a Lua file is required, it is executed from top to bottom. + Any global variables or functions defined will be available in the requiring scope. +]] + function state.init() + print("Initializing state...") + -- TODO: add reference state files + end + + return state +end + +_G.package.loaded[".state.init"] = _loaded_mod_state_init() + +local process = { name = "ARIO", version = "1.0.0" } + +-- load all the code related to the process + +require(".src.init").init() +require(".state.init") -- load any desired state files + +return process diff --git a/tests/gar.test.mjs b/tests/gar.test.mjs index dd315334..45095bad 100644 --- a/tests/gar.test.mjs +++ b/tests/gar.test.mjs @@ -964,7 +964,7 @@ describe('GatewayRegistry', async () => { }); if (instant) { - assert.equal(decreaseDelegateStakeResult.Messages.length, 1); + assert.equal(decreaseDelegateStakeResult.Messages.length, 2); decreaseDelegateStakeResult.Messages[0].Tags.sort((a, b) => a.name.localeCompare(b.name), ); @@ -980,7 +980,7 @@ describe('GatewayRegistry', async () => { delete decreaseDelegateStakeResult.Messages[0].Data; assert.deepStrictEqual(decreaseDelegateStakeResult.Messages[0], { Target: delegatorAddress, - Anchor: '00000000000000000000000000000008', + Anchor: '00000000000000000000000000000013', Tags: [ { name: 'Action', @@ -1020,7 +1020,7 @@ describe('GatewayRegistry', async () => { }, { name: 'Ref_', - value: '8', + value: '13', }, { name: 'Type', @@ -1754,7 +1754,7 @@ describe('GatewayRegistry', async () => { memory: gatewayMemory, }); - assert.equal(result.Messages.length, 1); + assert.equal(result.Messages.length, 2); assert.equal(result.Messages[0].Target, observerAddress); assert.deepEqual(JSON.parse(result.Messages[0].Data), { reports: { @@ -1776,7 +1776,7 @@ describe('GatewayRegistry', async () => { shouldAssertNoResultError: false, }); - assert.equal(result.Messages.length, 1); + assert.equal(result.Messages.length, 2); assert.equal(result.Messages[0].Target, invalidObserver); assert.ok( result.Messages[0].Data.includes( @@ -1795,7 +1795,7 @@ describe('GatewayRegistry', async () => { memory: gatewayMemory, shouldAssertNoResultError: false, }); - assert.equal(result.Messages.length, 1); + assert.equal(result.Messages.length, 2); assert.ok( result.Messages[0].Data.includes( 'Invalid report tx id. Must be a valid Arweave address.', @@ -1814,7 +1814,7 @@ describe('GatewayRegistry', async () => { shouldAssertNoResultError: false, }); - assert.equal(result.Messages?.length, 1); + assert.equal(result.Messages?.length, 2); assert.ok( result.Messages[0].Data.includes('Invalid failed gateway address:'), ); @@ -1831,7 +1831,7 @@ describe('GatewayRegistry', async () => { shouldAssertNoResultError: false, }); - assert.equal(result.Messages.length, 1); + assert.equal(result.Messages.length, 2); assert.ok( result.Messages[0].Data.includes( `Observations for epoch 0 must be submitted after ${epochSettings.epochZeroStartTimestamp}`, @@ -1851,7 +1851,7 @@ describe('GatewayRegistry', async () => { shouldAssertNoResultError: false, }); - assert.equal(result.Messages.length, 1); + assert.equal(result.Messages.length, 2); assert.ok( result.Messages[0].Data.includes( `Observations for epoch 0 must be submitted before ${epochSettings.epochZeroStartTimestamp + epochSettings.durationMs}`, diff --git a/tests/handlers.test.mjs b/tests/handlers.test.mjs index fe4429c1..78ead09a 100644 --- a/tests/handlers.test.mjs +++ b/tests/handlers.test.mjs @@ -24,7 +24,7 @@ describe('handlers', async () => { const defaultIndex = handlersList.indexOf('_default'); const sanitizeIndex = handlersList.indexOf('sanitize'); const pruneIndex = handlersList.indexOf('prune'); - const expectedHandlerCount = 75; // this should be updated if more handlers are added + const expectedHandlerCount = 76; // this should be updated if more handlers are added assert.ok(evalIndex === 0); assert.ok(defaultIndex === 1); assert.ok(sanitizeIndex === 2); diff --git a/tests/patch-balances.test.mjs b/tests/patch-balances.test.mjs new file mode 100644 index 00000000..e7054edc --- /dev/null +++ b/tests/patch-balances.test.mjs @@ -0,0 +1,190 @@ +/** + * Do a transfer from one address to a new one to ensure hb patches the new address + * Do a transfer that transfers all the tokens to a different address to ensure hb patches a 0 amount on the old address + * After that, do a computeTotalSupply call to ensure that hb patches the empty (nil) address afterwards as 0 + */ + +import { handle, transfer } from './helpers.mjs'; +import { describe, it } from 'node:test'; +import assert from 'node:assert'; +import { + PROCESS_OWNER, + STUB_ADDRESS, + STUB_TIMESTAMP, +} from '../tools/constants.mjs'; + +describe('hyperbeam patch balances', async () => { + it('should handle sending a patch to a newly created address', async () => { + const sender = STUB_ADDRESS; + const recipient = ''.padEnd(43, 'a'); + const quantity = 100000000; + const transferToSenderAddressMemory = await transfer({ + recipient: sender, + quantity, + }); + const transferToRecipientAddress = await handle({ + options: { + From: sender, + Owner: sender, + Tags: [ + { name: 'Action', value: 'Transfer' }, + { name: 'Recipient', value: recipient }, + { name: 'Quantity', value: String(quantity / 2) }, + ], + Timestamp: STUB_TIMESTAMP, + }, + memory: transferToSenderAddressMemory, + }); + const patchMessage = transferToRecipientAddress.Messages.at(-1); + const patchData = patchMessage.Tags.find( + (tag) => tag.name === 'balances', + ).value; + assert.equal(patchData[sender], quantity / 2); + assert.equal(patchData[recipient], quantity / 2); + }); + + it('should handle sending a patch that drains an address', async () => { + const sender = STUB_ADDRESS; + const recipient = ''.padEnd(43, 'a'); + const quantity = 100000000; + const transferToSenderAddressMemory = await transfer({ + recipient: sender, + quantity, + }); + + const transferToRecipientAddress = await handle({ + options: { + From: sender, + Owner: sender, + Tags: [ + { name: 'Action', value: 'Transfer' }, + { name: 'Recipient', value: recipient }, + { name: 'Quantity', value: String(quantity / 2) }, + ], + Timestamp: STUB_TIMESTAMP, + }, + memory: transferToSenderAddressMemory, + }); + + const patchMessage = transferToRecipientAddress.Messages.at(-1); + const patchData = patchMessage.Tags.find( + (tag) => tag.name === 'balances', + ).value; + assert.equal(patchData[sender], quantity / 2); + assert.equal(patchData[recipient], quantity / 2); + + const transferToDrainerAddress = await handle({ + options: { + From: sender, + Owner: sender, + Tags: [ + { name: 'Action', value: 'Transfer' }, + { name: 'Recipient', value: recipient }, + { name: 'Quantity', value: String(quantity / 2) }, + ], + Timestamp: STUB_TIMESTAMP, + }, + memory: transferToRecipientAddress.Memory, + }); + + const patchMessage2 = transferToDrainerAddress.Messages.at(-1); + const patchData2 = patchMessage2.Tags.find( + (tag) => tag.name === 'balances', + ).value; + assert.equal(patchData2[sender], 0); + assert.equal(patchData2[recipient], quantity); + }); + + it('should handle sending a patch when an address is removed from balances', async () => { + const sender = STUB_ADDRESS; + const recipient = ''.padEnd(43, 'a'); + const quantity = 100000000; + const transferToSenderAddressMemory = await transfer({ + recipient: sender, + quantity, + }); + const transferToRecipientAddress = await handle({ + options: { + From: sender, + Owner: sender, + Tags: [ + { name: 'Action', value: 'Transfer' }, + { name: 'Recipient', value: recipient }, + { name: 'Quantity', value: String(quantity / 2) }, + ], + Timestamp: STUB_TIMESTAMP, + }, + memory: transferToSenderAddressMemory, + }); + const patchMessage = transferToRecipientAddress.Messages.at(-1); + const patchData = patchMessage.Tags.find( + (tag) => tag.name === 'balances', + ).value; + assert.equal(patchData[sender], quantity / 2); + assert.equal(patchData[recipient], quantity / 2); + + const transferToDrainerAddress = await handle({ + options: { + From: sender, + Owner: sender, + Tags: [ + { name: 'Action', value: 'Transfer' }, + { name: 'Recipient', value: recipient }, + { name: 'Quantity', value: String(quantity / 2) }, + ], + Timestamp: STUB_TIMESTAMP, + }, + memory: transferToRecipientAddress.Memory, + }); + + const patchMessage2 = transferToDrainerAddress.Messages.at(-1); + const patchData2 = patchMessage2.Tags.find( + (tag) => tag.name === 'balances', + ).value; + assert.equal(patchData2[sender], 0); + assert.equal(patchData2[recipient], quantity); + + const tokenSupplyRes = await handle({ + options: { + Tags: [{ name: 'Action', value: 'Total-Supply' }], + }, + memory: transferToDrainerAddress.Memory, + }); + + const patchMessage3 = tokenSupplyRes.Messages.at(-1); + const patchData3 = patchMessage3.Tags.find( + (tag) => tag.name === 'balances', + ).value; + assert.equal(patchData3[sender], 0); + }); + + it('should only send one patch message on Patch-Hyperbeam-Balances', async () => { + const result = await handle({ + options: { + From: PROCESS_OWNER, + Owner: PROCESS_OWNER, + Tags: [{ name: 'Action', value: 'Patch-Hyperbeam-Balances' }], + }, + }); + console.dir(result, { depth: null }); + assert.equal(result.Messages.length, 2); + }); + + it('should only allow the owner to trigger Patch-Hyperbeam-Balances', async () => { + const result = await handle({ + options: { + From: STUB_ADDRESS, + Owner: STUB_ADDRESS, + Tags: [{ name: 'Action', value: 'Patch-Hyperbeam-Balances' }], + }, + shouldAssertNoResultError: false, + }); + const error = result.Messages.at(-1).Tags.find( + (tag) => tag.name === 'Error', + ).value; + assert( + error.includes('Only the owner can trigger Patch-Hyperbeam-Balances'), + 'Only the owner can trigger Patch-Hyperbeam-Balances', + ); + }); +}); diff --git a/tests/primary.test.mjs b/tests/primary.test.mjs index 676e82ae..428a0bc2 100644 --- a/tests/primary.test.mjs +++ b/tests/primary.test.mjs @@ -329,7 +329,7 @@ describe('primary names', function () { }); // there should be two messages, one to the ant and one to the owner - assert.equal(approvePrimaryNameRequestResult.Messages.length, 2); + assert.equal(approvePrimaryNameRequestResult.Messages.length, 3); assert.equal( approvePrimaryNameRequestResult.Messages[0].Target, processId, @@ -454,7 +454,7 @@ describe('primary names', function () { }); // there should be only one message with the Approve-Primary-Name-Request-Notice action - assert.equal(requestPrimaryNameResult.Messages.length, 1); + assert.equal(requestPrimaryNameResult.Messages.length, 2); assert.equal(requestPrimaryNameResult.Messages[0].Target, processId); // find the action tag in the messages @@ -565,7 +565,7 @@ describe('primary names', function () { // assert no error assertNoResultError(removePrimaryNameResult); // assert 2 messages sent - one to the owner and one to the recipient - assert.equal(removePrimaryNameResult.Messages.length, 2); + assert.equal(removePrimaryNameResult.Messages.length, 3); assert.equal(removePrimaryNameResult.Messages[0].Target, processId); assert.equal(removePrimaryNameResult.Messages[1].Target, recipient); const removedPrimaryNameData = JSON.parse( diff --git a/tests/tick.test.mjs b/tests/tick.test.mjs index 9a4f2237..2eb92eee 100644 --- a/tests/tick.test.mjs +++ b/tests/tick.test.mjs @@ -369,7 +369,7 @@ describe('Tick', async () => { }); // should only have one message with a tick notice, the epoch distribution notice is sent separately - assert.equal(newEpochTick.Messages.length, 3); + assert.equal(newEpochTick.Messages.length, 4); // updated demand factor is sent assert.equal( newEpochTick.Messages[0].Tags.find((tag) => tag.name === 'Action').value, @@ -464,7 +464,7 @@ describe('Tick', async () => { ); // assert multiple messages are sent given the tick notice, epoch created notice and epoch distribution notice - assert.equal(distributionTick.Messages.length, 4); // 1 epoch distribution notice, 1 epoch created notice, 1 tick notice, 1 demand factor updated notice + assert.equal(distributionTick.Messages.length, 5); // 1 epoch distribution notice, 1 epoch created notice, 1 tick notice, 1 demand factor updated notice // new epoch is created const createdMessage = distributionTick.Messages.find( diff --git a/tests/utils.mjs b/tests/utils.mjs index 28dedf31..a93054ef 100644 --- a/tests/utils.mjs +++ b/tests/utils.mjs @@ -12,8 +12,11 @@ import assert from 'node:assert'; * Loads the aos wasm binary and returns the handle function with program memory * @returns {Promise<{handle: Function, memory: WebAssembly.Memory}>} */ -export async function createAosLoader() { - const handle = await AoLoader(AOS_WASM, AO_LOADER_OPTIONS); +export async function createAosLoader({ + wasm = AOS_WASM, + lua = BUNDLED_SOURCE_CODE, +} = {}) { + const handle = await AoLoader(wasm, AO_LOADER_OPTIONS); const evalRes = await handle( null, { @@ -22,8 +25,9 @@ export async function createAosLoader() { { name: 'Action', value: 'Eval' }, { name: 'Module', value: ''.padEnd(43, '1') }, ], - Data: BUNDLED_SOURCE_CODE, + Data: lua, }, + AO_LOADER_HANDLER_ENV, ); return { diff --git a/tests/vaults.test.mjs b/tests/vaults.test.mjs index ea9d8981..19e3ff1e 100644 --- a/tests/vaults.test.mjs +++ b/tests/vaults.test.mjs @@ -334,7 +334,7 @@ describe('Vaults', async () => { }); // it should create two messages, one for sender and other for recipient - assert.deepEqual(createVaultedTransferResult.Messages.length, 2); + assert.deepEqual(createVaultedTransferResult.Messages.length, 3); const senderMessage = createVaultedTransferResult.Messages.find((msg) => msg.Tags.find( @@ -431,7 +431,7 @@ describe('Vaults', async () => { memory: createVaultedTransferResult.Memory, }); - assert.deepEqual(result.Messages.length, 2); + assert.deepEqual(result.Messages.length, 3); const recipientMessageAfterRevoke = result.Messages.find((msg) => msg.Tags.find( (tag) => tag.name === 'Action' && tag.value === 'Revoke-Vault-Notice', diff --git a/tools/constants.mjs b/tools/constants.mjs index c3c25598..e9274837 100644 --- a/tools/constants.mjs +++ b/tools/constants.mjs @@ -23,7 +23,12 @@ export const AO_LOADER_HANDLER_ENV = { Process: { Id: PROCESS_ID, Owner: PROCESS_OWNER, - Tags: [{ name: 'Authority', value: 'XXXXXX' }], + Tags: [ + { + name: 'Authority', + value: 'XXXXXX', + }, + ], }, Module: { Id: PROCESS_ID,