|
48 | 48 | entry_notifier_service::EntryNotifierSender, |
49 | 49 | leader_schedule_cache::LeaderScheduleCache, |
50 | 50 | leader_schedule_utils::first_of_consecutive_leader_slots, |
51 | | - shred::{ErasureSetId, DATA_SHREDS_PER_FEC_BLOCK}, |
| 51 | + shred::DATA_SHREDS_PER_FEC_BLOCK, |
52 | 52 | }, |
53 | 53 | solana_measure::measure::Measure, |
54 | 54 | solana_poh::poh_recorder::{PohLeaderStatus, PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}, |
@@ -2898,43 +2898,60 @@ impl ReplayStage { |
2898 | 2898 | (bank.slot(), bank.hash()), |
2899 | 2899 | Some((bank.parent_slot(), bank.parent_hash())), |
2900 | 2900 | ); |
2901 | | - // If the block does not have at least 64 shreds in the last FEC set, mark |
2902 | | - // it as invalid, effectively removing it from fork choice. |
2903 | | - let mut last_fec_set_too_small = true; |
2904 | | - let slot_meta = blockstore |
2905 | | - .meta(bank.slot()) |
2906 | | - .expect("Slot meta get must succeed on frozen banks") |
2907 | | - .expect("Slot meta must exist during freeze"); |
2908 | | - if let Some(last_shred_index) = slot_meta.last_index { |
2909 | | - if let Ok(Some(erasure_meta)) = blockstore.erasure_meta(ErasureSetId::new( |
2910 | | - bank.slot(), |
2911 | | - u32::try_from(last_shred_index).expect("LAST_SHRED_IN_SLOT should be u32"), |
2912 | | - )) { |
2913 | | - if erasure_meta.total_shreds() >= 2 * DATA_SHREDS_PER_FEC_BLOCK { |
2914 | | - last_fec_set_too_small = false; |
2915 | | - } |
2916 | | - } |
2917 | | - } |
2918 | | - // If there is no erasure meta then we have not received a coding shred for this |
2919 | | - // fec set. If there is no `slot_meta.last_index` then we should not be freezing |
2920 | | - // the bank. There is already a duplicate check ensuring `LAST_SHRED_IN_SLOT` is |
2921 | | - // consistent. At this point if `incomplete_last_fec_set` is `false`, then the |
2922 | | - // leader has sent less than 2 * DATA_SHREDS_PER_FEC_BLOCK shreds in the last fec set, |
2923 | | - // meaning we can disregard this slot |
2924 | | - if last_fec_set_too_small { |
2925 | | - heaviest_subtree_fork_choice |
2926 | | - .mark_fork_invalid_candidate(&(bank.slot(), bank.hash())); |
2927 | | - } |
2928 | 2901 |
|
2929 | 2902 | bank_progress.fork_stats.bank_hash = Some(bank.hash()); |
2930 | | - let bank_frozen_state = BankFrozenState::new_from_state( |
| 2903 | + let mut bank_frozen_state = BankFrozenState::new_from_state( |
2931 | 2904 | bank.slot(), |
2932 | 2905 | bank.hash(), |
2933 | 2906 | duplicate_slots_tracker, |
2934 | 2907 | duplicate_confirmed_slots, |
2935 | 2908 | heaviest_subtree_fork_choice, |
2936 | 2909 | epoch_slots_frozen_slots, |
2937 | 2910 | ); |
| 2911 | + |
| 2912 | + if bank |
| 2913 | + .feature_set |
| 2914 | + .is_active(&solana_sdk::feature_set::vote_only_full_fec_sets::id()) |
| 2915 | + { |
| 2916 | + let mut last_fec_set_too_small = true; |
| 2917 | + if let Some((fec_set_index, shred_index)) = |
| 2918 | + blockstore.get_last_shred_indices(bank.slot()) |
| 2919 | + { |
| 2920 | + // We need to check if the last FEC set index contains at least `DATA_SHREDS_PER_FEC_BLOCK` data shreds. |
| 2921 | + // Since we froze the slot we know that the data shreds are connected. We can offset from the |
| 2922 | + // last data shred index to compare the size of the last FEC set. |
| 2923 | + // offset_index = shred_index - (DATA_SHREDS_PER_FEC_BLOCK - 1) |
| 2924 | + let offset_index = u32::try_from(DATA_SHREDS_PER_FEC_BLOCK) |
| 2925 | + .unwrap() |
| 2926 | + .checked_sub(1) |
| 2927 | + .and_then(|offset| shred_index.checked_sub(offset)); |
| 2928 | + if let Some(offset_index) = offset_index { |
| 2929 | + if let Some(data_shred_fec_set_index) = |
| 2930 | + blockstore.get_data_shred_fec_set_index(bank.slot(), offset_index) |
| 2931 | + { |
| 2932 | + if fec_set_index == data_shred_fec_set_index { |
| 2933 | + // This implies that the last fec set contains at least `DATA_SHREDS_PER_FEC_BLOCK`. |
| 2934 | + // Since we have reached the max tick height if there are more data shreds not yet received |
| 2935 | + // this block will be marked dead/invalid once they are received. |
| 2936 | + // |
| 2937 | + // Under this assumption, we must have recovered the remaining `DATA_SHREDS_PER_FEC_BLOCK+` coding |
| 2938 | + // shreds as well, and the FEC set contains at least `DATA_SHREDS_PER_FEC_BLOCK`. |
| 2939 | + // TODO: Recovery is only possible if we receive at least 1 coding shred. for the 32 data 0 coding |
| 2940 | + // case we need to wait to continue until 1 coding shred has been received. We can add a separate |
| 2941 | + // check here or address this as part of the IP verification which also needs a wait for 33/64. |
| 2942 | + last_fec_set_too_small = false; |
| 2943 | + } |
| 2944 | + } |
| 2945 | + } |
| 2946 | + } |
| 2947 | + |
| 2948 | + if last_fec_set_too_small { |
| 2949 | + // If the block does not have at least 2 * DATA_SHREDS_PER_FEC_BLOCK shreds in the last FEC set, treat it |
| 2950 | + // as duplicate, effectively removing it from fork choice. |
| 2951 | + bank_frozen_state.mark_duplicate(); |
| 2952 | + } |
| 2953 | + } |
| 2954 | + |
2938 | 2955 | check_slot_agrees_with_cluster( |
2939 | 2956 | bank.slot(), |
2940 | 2957 | bank_forks.read().unwrap().root(), |
|
0 commit comments