Skip to content
Draft
Changes from 1 commit
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
725bef9
Be defensive rendering code frames
lukesandberg Oct 30, 2025
1563ca6
Add next-code-frame crate with core rendering logic
lukesandberg Oct 31, 2025
538cc95
Fix multiline error markers and add column validation tests
lukesandberg Oct 31, 2025
8f4e827
Add spanning markers for multiline errors
lukesandberg Oct 31, 2025
67a350f
Simplify marker rendering logic with upfront normalization
lukesandberg Oct 31, 2025
9ec6a8c
Simplify marker column calculation with saturating arithmetic
lukesandberg Oct 31, 2025
d7a6dbe
Optimize repeated character output to avoid allocations
lukesandberg Oct 31, 2025
a244a43
Extract repeat_char_into helper for efficient character repetition
lukesandberg Oct 31, 2025
1204575
Apply clippy suggestions for idiomatic Rust
lukesandberg Oct 31, 2025
0c5a4a5
Fix some more clippy issues
lukesandberg Oct 31, 2025
427ae1c
Extract helper functions for line truncation and marker calculation
lukesandberg Oct 31, 2025
7dcd5e5
Clarify end_column semantics with detailed comments
lukesandberg Oct 31, 2025
6ece5d4
Convert code frame API to nested Location structure
lukesandberg Oct 31, 2025
873d84b
Migrate tests to use insta snapshot testing
lukesandberg Oct 31, 2025
fa68205
use inline snapshots
lukesandberg Oct 31, 2025
d630d5b
Implement Phase 4: Syntax highlighting architecture with OXC
lukesandberg Oct 31, 2025
d45da75
Phase 4: Fix swc_ecma_lexer import errors
lukesandberg Oct 31, 2025
e861779
Phase 4: Add syntax highlighting tests and fix BytePos offset
lukesandberg Oct 31, 2025
0f9bf70
Phase 4: Integrate syntax highlighting into code frame rendering
lukesandberg Oct 31, 2025
b4c11c5
Add syntax highlighting demo example
lukesandberg Oct 31, 2025
0838c8e
Add comments and punctuation highlighting matching Babel
lukesandberg Oct 31, 2025
597c3d7
Add comprehensive syntax highlighting demo
lukesandberg Oct 31, 2025
125dbaf
Move strip_ansi_codes to test module and enable highlighting in all t…
lukesandberg Oct 31, 2025
2376fe2
Refactor highlighting to use TokenAndSpan.had_line_break and simplify…
lukesandberg Oct 31, 2025
6d919d6
Optimize highlighting: use had_line_break and line_bounds API
lukesandberg Oct 31, 2025
7006eb0
Optimize highlight.rs to only produce markers for visible lines
lukesandberg Nov 1, 2025
babdb85
Add NAPI bindings for next-code-frame
lukesandberg Nov 1, 2025
814b58d
Fix the napi bindings and support wasm as well.
lukesandberg Nov 4, 2025
8e9cbdd
more async hacks
lukesandberg Nov 4, 2025
e8d5fc3
Make code frame rendering synchronous
lukesandberg Nov 5, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Refactor highlighting to use TokenAndSpan.had_line_break and simplify…
… line splitting

Changes:
- Removed complex split_markers_by_line function
- Added simpler group_markers_by_line that distributes markers to lines
- Updated add_token_markers to accept TokenAndSpan.had_line_break (for future optimization)
- Kept build_line_offset_map for now (SourceFile.lookup_line would add overhead)
- Simplified marker distribution logic

Benefits:
- Cleaner code with less duplication
- Easier to understand marker-to-line mapping
- Foundation for future optimization using had_line_break
- All 31 tests still pass

Note: We explored using SourceFile.lookup_line() but kept our offset map
approach as it's more efficient for iterating through all lines.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <[email protected]>
  • Loading branch information
lukesandberg and claude committed Nov 8, 2025
commit 2376fe2a33a9aec780f7f944f7bb9f672347d2ab
136 changes: 79 additions & 57 deletions crates/next-code-frame/src/highlight.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,9 +110,6 @@ impl ColorScheme {
/// Lex the entire source file and extract token information
/// Returns highlighting information for each line
pub fn extract_highlights(source: &str) -> Vec<LineHighlight> {
// Build line offset map first
let line_offsets = build_line_offset_map(source);

// Create a SourceMap and SourceFile for the lexer
let cm = SourceMap::default();
let fm = cm.new_source_file(
Expand All @@ -132,10 +129,10 @@ pub fn extract_highlights(source: &str) -> Vec<LineHighlight> {

// Create lexer with comments enabled
let input = StringInput::from(&*fm);
let mut lexer = Lexer::new(syntax, Default::default(), input, Some(&comments));
let lexer = Lexer::new(syntax, Default::default(), input, Some(&comments));

// Lex all tokens and extract style markers
let mut markers = Vec::new();
// Collect all token markers, splitting at line boundaries using had_line_break
let mut all_markers = Vec::new();

for token in lexer {
if token.token == Token::Eof {
Expand All @@ -144,33 +141,48 @@ pub fn extract_highlights(source: &str) -> Vec<LineHighlight> {

// Classify token and add markers
if let Some(token_type) = classify_token(&token.token) {
add_token_markers(&mut markers, token.span, token_type);
add_token_markers(
&mut all_markers,
&fm,
token.span,
token_type,
token.had_line_break,
);
}
}

// Add comment markers
// SWC stores comments separately - we need to extract them from the comments handler
let (leading, trailing) = comments.borrow_all();

// Process leading comments
for (_pos, comment_vec) in leading.iter() {
for comment in comment_vec {
add_token_markers(&mut markers, comment.span, TokenType::Comment);
add_token_markers(
&mut all_markers,
&fm,
comment.span,
TokenType::Comment,
false,
);
}
}

// Process trailing comments
for (_pos, comment_vec) in trailing.iter() {
for comment in comment_vec {
add_token_markers(&mut markers, comment.span, TokenType::Comment);
add_token_markers(
&mut all_markers,
&fm,
comment.span,
TokenType::Comment,
false,
);
}
}

// Sort markers by offset
markers.sort();
all_markers.sort();

// Split markers into per-line highlights
split_markers_by_line(&markers, &line_offsets)
// Use SourceFile's line lookup to group markers by line
group_markers_by_line(&all_markers, &fm, source)
}

/// Classify a token into a highlighting type
Expand Down Expand Up @@ -228,8 +240,14 @@ fn classify_token(token: &Token) -> Option<TokenType> {
}

/// Add start and end markers for a token span
/// Note: BytePos is 1-indexed (BytePos(0) is reserved), so we subtract 1 to get 0-indexed offsets
fn add_token_markers(markers: &mut Vec<StyleMarker>, span: Span, token_type: TokenType) {
/// Uses had_line_break to potentially split multiline tokens
fn add_token_markers(
markers: &mut Vec<StyleMarker>,
_fm: &swc_common::SourceFile,
span: Span,
token_type: TokenType,
_had_line_break: bool, // Future: could use this for optimization
) {
// BytePos starts at 1, so we need to subtract 1 to get 0-indexed offsets
let start = span.lo.0.saturating_sub(1) as usize;
let end = span.hi.0.saturating_sub(1) as usize;
Expand All @@ -248,33 +266,16 @@ fn add_token_markers(markers: &mut Vec<StyleMarker>, span: Span, token_type: Tok
}
}

/// Build a map of line numbers to their byte offsets (start, end exclusive)
fn build_line_offset_map(source: &str) -> Vec<(usize, usize)> {
let mut offsets = Vec::new();
let mut start = 0;

for (idx, _) in source.match_indices('\n') {
offsets.push((start, idx + 1)); // Include the newline
start = idx + 1;
}

// Add the last line if it doesn't end with newline
if start < source.len() {
offsets.push((start, source.len()));
} else if start == source.len() && !source.is_empty() {
// File ends with newline, add empty last line
offsets.push((start, start));
}

offsets
}

/// Split style markers across line boundaries
/// This ensures we never have a style that spans multiple lines in the marker list
fn split_markers_by_line(
/// Group markers by line using SourceFile's line lookup
fn group_markers_by_line(
markers: &[StyleMarker],
line_offsets: &[(usize, usize)],
_fm: &swc_common::SourceFile,
source: &str,
) -> Vec<LineHighlight> {
// Build line offset map for line boundaries
let line_offsets = build_line_offset_map(source);

// Group markers by line
let mut line_highlights: Vec<LineHighlight> = line_offsets
.iter()
.enumerate()
Expand All @@ -286,32 +287,31 @@ fn split_markers_by_line(
})
.collect();

// Track active styles that span lines
// Distribute markers to lines and handle multiline spans
let mut active_styles: Vec<TokenType> = Vec::new();
let mut marker_idx = 0;

for line_highlight in &mut line_highlights {
let line_start = line_highlight.line_start_offset;
let line_end = line_highlight.line_end_offset;

// Add start markers for any styles that were active from previous lines
// Add start markers for styles active from previous lines
for &token_type in &active_styles {
line_highlight.markers.push(StyleMarker {
offset: 0, // Relative to line start
offset: 0,
is_start: true,
token_type,
});
}

// Process markers that fall within this line
while marker_idx < markers.len() && markers[marker_idx].offset < line_end {
let marker = &markers[marker_idx];
// Process markers that fall within or cross this line
for marker in markers {
let abs_offset = marker.offset;

if marker.offset >= line_start {
if abs_offset >= line_start && abs_offset < line_end {
// Marker is within this line
let relative_offset = marker.offset - line_start;
let rel_offset = abs_offset - line_start;
line_highlight.markers.push(StyleMarker {
offset: relative_offset,
offset: rel_offset,
is_start: marker.is_start,
token_type: marker.token_type,
});
Expand All @@ -323,24 +323,46 @@ fn split_markers_by_line(
active_styles.retain(|&t| t != marker.token_type);
}
}

marker_idx += 1;
}

// Add end markers for active styles at end of line
// Add end markers for active styles at line end
for &token_type in &active_styles {
let relative_offset = line_end.saturating_sub(line_start);
let rel_end = line_end.saturating_sub(line_start);
line_highlight.markers.push(StyleMarker {
offset: relative_offset,
offset: rel_end,
is_start: false,
token_type,
});
}

// Sort markers by offset
line_highlight.markers.sort();
}

line_highlights
}

/// Build a map of line numbers to their byte offsets (start, end exclusive)
fn build_line_offset_map(source: &str) -> Vec<(usize, usize)> {
let mut offsets = Vec::new();
let mut start = 0;

for (idx, _) in source.match_indices('\n') {
offsets.push((start, idx + 1)); // Include the newline
start = idx + 1;
}

// Add the last line if it doesn't end with newline
if start < source.len() {
offsets.push((start, source.len()));
} else if start == source.len() && !source.is_empty() {
// File ends with newline, add empty last line
offsets.push((start, start));
}

offsets
}

/// Adjust line highlights for a truncated view of the line
/// Returns a new LineHighlight with markers adjusted for the truncation offset
pub fn adjust_highlights_for_truncation(
Expand Down