-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathtypes.rs
More file actions
1626 lines (1484 loc) · 55.4 KB
/
types.rs
File metadata and controls
1626 lines (1484 loc) · 55.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
use crate::core::errors::DataProfilerError;
use std::collections::HashMap;
// ============================================================================
// Source-Agnostic Data Source Types
// ============================================================================
/// Supported file formats for data profiling
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum FileFormat {
Csv,
Json,
Jsonl,
Parquet,
#[serde(untagged)]
Unknown(String),
}
impl std::fmt::Display for FileFormat {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Csv => write!(f, "csv"),
Self::Json => write!(f, "json"),
Self::Jsonl => write!(f, "jsonl"),
Self::Parquet => write!(f, "parquet"),
Self::Unknown(s) => write!(f, "{}", s),
}
}
}
/// Supported query engines for SQL-based profiling
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum QueryEngine {
DataFusion,
Postgres,
MySql,
Sqlite,
Snowflake,
BigQuery,
#[serde(untagged)]
Custom(String),
}
impl std::fmt::Display for QueryEngine {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::DataFusion => write!(f, "datafusion"),
Self::Postgres => write!(f, "postgres"),
Self::MySql => write!(f, "mysql"),
Self::Sqlite => write!(f, "sqlite"),
Self::Snowflake => write!(f, "snowflake"),
Self::BigQuery => write!(f, "bigquery"),
Self::Custom(s) => write!(f, "{}", s),
}
}
}
/// Source library for in-memory DataFrame profiling
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum DataFrameLibrary {
Pandas,
Polars,
PyArrow,
#[serde(untagged)]
Custom(String),
}
impl std::fmt::Display for DataFrameLibrary {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Pandas => write!(f, "pandas"),
Self::Polars => write!(f, "polars"),
Self::PyArrow => write!(f, "pyarrow"),
Self::Custom(s) => write!(f, "{}", s),
}
}
}
/// Supported stream source systems
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum StreamSourceSystem {
Kafka,
Kinesis,
Pulsar,
Http, // For REST API ingestion
WebSocket, // For WS ingestion
#[serde(untagged)]
Custom(String),
}
impl std::fmt::Display for StreamSourceSystem {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Kafka => write!(f, "kafka"),
Self::Kinesis => write!(f, "kinesis"),
Self::Pulsar => write!(f, "pulsar"),
Self::Http => write!(f, "http"),
Self::WebSocket => write!(f, "websocket"),
Self::Custom(s) => write!(f, "{}", s),
}
}
}
/// Source-agnostic data source metadata
///
/// Supports multiple data source types with proper semantics:
/// - Files: CSV, JSON, Parquet with path, size, and format metadata
/// - Queries: SQL queries with engine, statement, and execution metadata
/// - DataFrames: In-memory pandas/polars/pyarrow via PyCapsule
/// - Streams: Streaming sources with topic, partition, and batch tracking
///
/// # JSON Serialization
/// Uses tagged enum format for clean API output:
/// ```json
/// { "type": "file", "path": "/data/users.csv", "format": "csv", ... }
/// { "type": "query", "engine": "duckdb", "statement": "SELECT ...", ... }
/// { "type": "dataframe", "name": "sales", "source_library": "pandas", ... }
/// { "type": "stream", "topic": "events", "batch_id": "b1", "source_system": "kafka", ... }
/// ```
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum DataSource {
/// File-based data source (CSV, JSON, Parquet, etc.)
File {
/// Absolute or relative path to the file
path: String,
/// Detected or specified file format
format: FileFormat,
/// File size in bytes
size_bytes: u64,
/// Last modification timestamp (ISO 8601 / RFC 3339)
#[serde(skip_serializing_if = "Option::is_none")]
modified_at: Option<String>,
/// Parquet-specific metadata (only present for Parquet files)
#[serde(skip_serializing_if = "Option::is_none")]
parquet_metadata: Option<ParquetMetadata>,
},
/// SQL query-based data source
Query {
/// Database engine used for the query
engine: QueryEngine,
/// SQL statement executed
statement: String,
/// Target database name (if applicable)
#[serde(skip_serializing_if = "Option::is_none")]
database: Option<String>,
/// Unique execution identifier for tracing
#[serde(skip_serializing_if = "Option::is_none")]
execution_id: Option<String>,
},
/// In-memory DataFrame source (pandas/polars via PyCapsule)
#[serde(rename = "dataframe")]
DataFrame {
/// User-provided name for identification
name: String,
/// Source library (pandas, polars, pyarrow)
source_library: DataFrameLibrary,
/// Number of rows at profiling time
row_count: usize,
/// Number of columns
column_count: usize,
/// Memory usage in bytes (if available)
#[serde(skip_serializing_if = "Option::is_none")]
memory_bytes: Option<u64>,
},
/// Streaming data source
Stream {
/// Stream identifier (e.g., Kafka topic, Kinesis stream name)
topic: String,
/// Batch identifier for ordering and deduplication
batch_id: String,
/// Partition for parallel processing (optional)
#[serde(skip_serializing_if = "Option::is_none")]
partition: Option<u32>,
/// Consumer group for Kafka-style coordination (optional)
#[serde(skip_serializing_if = "Option::is_none")]
consumer_group: Option<String>,
/// Source system identifier (kafka, kinesis, pulsar, http, etc.)
source_system: StreamSourceSystem,
/// Session ID for multi-tenant scenarios
#[serde(skip_serializing_if = "Option::is_none")]
session_id: Option<String>,
/// Timestamp of first record in batch (ISO 8601)
#[serde(skip_serializing_if = "Option::is_none")]
first_record_at: Option<String>,
/// Timestamp of last record in batch (ISO 8601)
#[serde(skip_serializing_if = "Option::is_none")]
last_record_at: Option<String>,
},
}
impl DataSource {
/// Get a human-readable identifier for this data source
///
/// Returns:
/// - For files: the file path
/// - For queries: "engine: truncated_statement"
/// - For dataframes: `library[name]`
/// - For streams: `system[topic]-batch:id`
pub fn identifier(&self) -> String {
match self {
Self::File { path, .. } => path.clone(),
Self::Query {
engine, statement, ..
} => {
let truncated = if statement.len() > 50 {
format!("{}...", &statement[..47])
} else {
statement.clone()
};
format!("{}: {}", engine, truncated)
}
Self::DataFrame {
name,
source_library,
..
} => format!("{}[{}]", source_library, name),
Self::Stream {
source_system,
topic,
batch_id,
..
} => format!("{}[{}]-batch:{}", source_system, topic, batch_id),
}
}
/// Get file size in megabytes if this is a file-based source or dataframe
pub fn size_mb(&self) -> Option<f64> {
match self {
Self::File { size_bytes, .. } => Some(*size_bytes as f64 / 1_048_576.0),
Self::DataFrame { memory_bytes, .. } => memory_bytes.map(|b| b as f64 / 1_048_576.0),
Self::Query { .. } | Self::Stream { .. } => None,
}
}
/// Check if this is a file-based source
pub fn is_file(&self) -> bool {
matches!(self, Self::File { .. })
}
/// Check if this is a query-based source
pub fn is_query(&self) -> bool {
matches!(self, Self::Query { .. })
}
/// Check if this is a DataFrame-based source
pub fn is_dataframe(&self) -> bool {
matches!(self, Self::DataFrame { .. })
}
/// Check if this is a Stream-based source
pub fn is_stream(&self) -> bool {
matches!(self, Self::Stream { .. })
}
/// Get the file path if this is a file-based source
pub fn file_path(&self) -> Option<&str> {
match self {
Self::File { path, .. } => Some(path),
_ => None,
}
}
/// Get the stream topic if this is a stream-based source
pub fn stream_topic(&self) -> Option<&str> {
match self {
Self::Stream { topic, .. } => Some(topic),
_ => None,
}
}
/// Get the batch ID if this is a stream-based source
pub fn batch_id(&self) -> Option<&str> {
match self {
Self::Stream { batch_id, .. } => Some(batch_id),
_ => None,
}
}
}
// ============================================================================
// ISO 25012 Quality Dimension Enum
// ============================================================================
/// ISO 25012 quality dimensions that can be selectively requested.
///
/// When no specific dimensions are requested (the default), all dimensions
/// are computed. Passing a subset enables "lazy metric packs" — only the
/// requested calculators run, saving CPU/memory for large datasets.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum QualityDimension {
Completeness,
Consistency,
Uniqueness,
Accuracy,
Timeliness,
}
impl QualityDimension {
/// All currently implemented dimensions.
pub fn all() -> Vec<Self> {
vec![
Self::Completeness,
Self::Consistency,
Self::Uniqueness,
Self::Accuracy,
Self::Timeliness,
]
}
}
impl std::str::FromStr for QualityDimension {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"completeness" => Ok(Self::Completeness),
"consistency" => Ok(Self::Consistency),
"uniqueness" => Ok(Self::Uniqueness),
"accuracy" => Ok(Self::Accuracy),
"timeliness" => Ok(Self::Timeliness),
_ => Err(format!("Unknown quality dimension: {s}")),
}
}
}
impl std::fmt::Display for QualityDimension {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Completeness => write!(f, "completeness"),
Self::Consistency => write!(f, "consistency"),
Self::Uniqueness => write!(f, "uniqueness"),
Self::Accuracy => write!(f, "accuracy"),
Self::Timeliness => write!(f, "timeliness"),
}
}
}
// ============================================================================
// Per-Dimension Metric Sub-Structs (ISO 25012 "Metric Packs")
// ============================================================================
/// Completeness metrics (ISO 8000-8)
#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)]
pub struct CompletenessMetrics {
/// Percentage of missing values across all cells
#[serde(serialize_with = "crate::serde_helpers::round_2")]
pub missing_values_ratio: f64,
/// Percentage of rows with no null values
#[serde(serialize_with = "crate::serde_helpers::round_2")]
pub complete_records_ratio: f64,
/// Columns with more than the threshold of null values
pub null_columns: Vec<String>,
}
/// Consistency metrics (ISO 8000-61)
#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)]
pub struct ConsistencyMetrics {
/// Percentage of values conforming to expected data type
#[serde(serialize_with = "crate::serde_helpers::round_2")]
pub data_type_consistency: f64,
/// Number of format violations (e.g., malformed dates)
pub format_violations: usize,
/// Number of UTF-8 encoding issues detected
pub encoding_issues: usize,
}
/// Uniqueness metrics (ISO 8000-110)
#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)]
pub struct UniquenessMetrics {
/// Number of exact duplicate rows
pub duplicate_rows: usize,
/// Percentage of unique values in key columns (if applicable)
#[serde(serialize_with = "crate::serde_helpers::round_2")]
pub key_uniqueness: f64,
/// Warning flag for columns with excessive unique values
pub high_cardinality_warning: bool,
}
/// Accuracy metrics (ISO 25012)
#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)]
pub struct AccuracyMetrics {
/// Percentage of statistically anomalous values (outliers)
#[serde(serialize_with = "crate::serde_helpers::round_2")]
pub outlier_ratio: f64,
/// Number of values outside expected ranges
pub range_violations: usize,
/// Number of negative values in positive-only fields (e.g., age)
pub negative_values_in_positive: usize,
}
/// Timeliness metrics (ISO 8000-8)
#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)]
pub struct TimelinessMetrics {
/// Number of future dates detected (dates beyond current date)
pub future_dates_count: usize,
/// Percentage of dates older than staleness threshold (e.g., >5 years)
#[serde(serialize_with = "crate::serde_helpers::round_2")]
pub stale_data_ratio: f64,
/// Temporal ordering violations (e.g., end_date < start_date)
pub temporal_violations: usize,
}
// ============================================================================
// Composable QualityMetrics (opt-in dimensions)
// ============================================================================
/// Comprehensive data quality metrics following industry standards.
///
/// Each ISO 25012 dimension is an `Option` — dimensions that were not requested
/// (or not computed) are `None`. When all dimensions are requested (the default),
/// all fields are `Some`.
///
/// This composable design enables "lazy metric packs": the engine only computes
/// the dimensions explicitly requested by the user, saving CPU/memory.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct QualityMetrics {
/// Completeness dimension (ISO 8000-8)
#[serde(skip_serializing_if = "Option::is_none")]
pub completeness: Option<CompletenessMetrics>,
/// Consistency dimension (ISO 8000-61)
#[serde(skip_serializing_if = "Option::is_none")]
pub consistency: Option<ConsistencyMetrics>,
/// Uniqueness dimension (ISO 8000-110)
#[serde(skip_serializing_if = "Option::is_none")]
pub uniqueness: Option<UniquenessMetrics>,
/// Accuracy dimension (ISO 25012)
#[serde(skip_serializing_if = "Option::is_none")]
pub accuracy: Option<AccuracyMetrics>,
/// Timeliness dimension (ISO 8000-8)
#[serde(skip_serializing_if = "Option::is_none")]
pub timeliness: Option<TimelinessMetrics>,
}
impl QualityMetrics {
/// Create metrics for an empty dataset (perfect quality, no data).
/// All dimensions are populated with default "perfect" values.
pub fn empty() -> Self {
Self {
completeness: Some(CompletenessMetrics {
missing_values_ratio: 0.0,
complete_records_ratio: 100.0,
null_columns: vec![],
}),
consistency: Some(ConsistencyMetrics {
data_type_consistency: 100.0,
format_violations: 0,
encoding_issues: 0,
}),
uniqueness: Some(UniquenessMetrics {
duplicate_rows: 0,
key_uniqueness: 100.0,
high_cardinality_warning: false,
}),
accuracy: Some(AccuracyMetrics {
outlier_ratio: 0.0,
range_violations: 0,
negative_values_in_positive: 0,
}),
timeliness: Some(TimelinessMetrics {
future_dates_count: 0,
stale_data_ratio: 0.0,
temporal_violations: 0,
}),
}
}
/// Calculate comprehensive data quality metrics from column data.
///
/// Delegates to the specialized MetricsCalculator for proper separation of concerns.
/// Uses default ISO 8000/25012 thresholds. Computes all dimensions.
pub fn calculate_from_data(
data: &HashMap<String, Vec<String>>,
column_profiles: &[ColumnProfile],
) -> Result<Self, DataProfilerError> {
let calculator = crate::analysis::MetricsCalculator::new();
calculator.calculate_comprehensive_metrics(data, column_profiles, None)
}
/// Calculate overall quality score (0-100) based on ISO 8000/25012 dimensions.
///
/// Weighted formula (only computed dimensions contribute):
/// - Completeness: 30% (complete_records_ratio)
/// - Consistency: 25% (data_type_consistency)
/// - Uniqueness: 20% (key_uniqueness)
/// - Accuracy: 15% (100 - outlier_ratio)
/// - Timeliness: 10% (100 - stale_data_ratio)
///
/// When some dimensions are `None`, the weights of computed dimensions
/// are re-normalized so the score is still on a 0–100 scale.
pub fn overall_score(&self) -> f64 {
let mut total_weight = 0.0;
let mut score = 0.0;
if let Some(c) = &self.completeness {
total_weight += 0.3;
score += c.complete_records_ratio * 0.3;
}
if let Some(c) = &self.consistency {
total_weight += 0.25;
score += c.data_type_consistency * 0.25;
}
if let Some(u) = &self.uniqueness {
total_weight += 0.2;
score += u.key_uniqueness * 0.2;
}
if let Some(a) = &self.accuracy {
total_weight += 0.15;
score += (100.0 - a.outlier_ratio) * 0.15;
}
if let Some(t) = &self.timeliness {
total_weight += 0.1;
score += (100.0 - t.stale_data_ratio) * 0.1;
}
if total_weight > 0.0 {
(score / total_weight).min(100.0)
} else {
0.0
}
}
// -- Convenience accessors for backward compatibility --
/// Missing values ratio (from completeness dimension, 0.0 if not computed)
pub fn missing_values_ratio(&self) -> f64 {
self.completeness
.as_ref()
.map_or(0.0, |c| c.missing_values_ratio)
}
/// Complete records ratio (from completeness dimension, 100.0 if not computed)
pub fn complete_records_ratio(&self) -> f64 {
self.completeness
.as_ref()
.map_or(100.0, |c| c.complete_records_ratio)
}
/// Null columns (from completeness dimension, empty if not computed)
pub fn null_columns(&self) -> &[String] {
self.completeness.as_ref().map_or(&[], |c| &c.null_columns)
}
/// Data type consistency (from consistency dimension, 100.0 if not computed)
pub fn data_type_consistency(&self) -> f64 {
self.consistency
.as_ref()
.map_or(100.0, |c| c.data_type_consistency)
}
/// Format violations (from consistency dimension, 0 if not computed)
pub fn format_violations(&self) -> usize {
self.consistency.as_ref().map_or(0, |c| c.format_violations)
}
/// Encoding issues (from consistency dimension, 0 if not computed)
pub fn encoding_issues(&self) -> usize {
self.consistency.as_ref().map_or(0, |c| c.encoding_issues)
}
/// Duplicate rows (from uniqueness dimension, 0 if not computed)
pub fn duplicate_rows(&self) -> usize {
self.uniqueness.as_ref().map_or(0, |u| u.duplicate_rows)
}
/// Key uniqueness (from uniqueness dimension, 100.0 if not computed)
pub fn key_uniqueness(&self) -> f64 {
self.uniqueness.as_ref().map_or(100.0, |u| u.key_uniqueness)
}
/// High cardinality warning (from uniqueness dimension, false if not computed)
pub fn high_cardinality_warning(&self) -> bool {
self.uniqueness
.as_ref()
.is_some_and(|u| u.high_cardinality_warning)
}
/// Outlier ratio (from accuracy dimension, 0.0 if not computed)
pub fn outlier_ratio(&self) -> f64 {
self.accuracy.as_ref().map_or(0.0, |a| a.outlier_ratio)
}
/// Range violations (from accuracy dimension, 0 if not computed)
pub fn range_violations(&self) -> usize {
self.accuracy.as_ref().map_or(0, |a| a.range_violations)
}
/// Negative values in positive fields (from accuracy dimension, 0 if not computed)
pub fn negative_values_in_positive(&self) -> usize {
self.accuracy
.as_ref()
.map_or(0, |a| a.negative_values_in_positive)
}
/// Future dates count (from timeliness dimension, 0 if not computed)
pub fn future_dates_count(&self) -> usize {
self.timeliness.as_ref().map_or(0, |t| t.future_dates_count)
}
/// Stale data ratio (from timeliness dimension, 0.0 if not computed)
pub fn stale_data_ratio(&self) -> f64 {
self.timeliness.as_ref().map_or(0.0, |t| t.stale_data_ratio)
}
/// Temporal violations (from timeliness dimension, 0 if not computed)
pub fn temporal_violations(&self) -> usize {
self.timeliness
.as_ref()
.map_or(0, |t| t.temporal_violations)
}
}
/// Confidence level for quality metrics — indicates whether metrics were
/// computed from the full dataset, a bounded sample, or a mix of both.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub enum MetricConfidence {
/// All metrics computed from the full dataset (exact)
Exact,
/// Metrics computed from a bounded sample (reservoir/HyperLogLog)
Approximate {
sample_size: usize,
population_size: Option<usize>,
},
/// Mix of exact stream counters (e.g., completeness from Welford)
/// and sampled metrics (e.g., uniqueness from HyperLogLog)
Mixed {
exact_dimensions: Vec<String>,
sampled_dimensions: Vec<String>,
sample_size: usize,
},
}
/// Wraps quality metrics with confidence information.
///
/// This replaces the former mandatory `DataQualityMetrics` field on reports,
/// adding information about how trustworthy each metric dimension is.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct QualityAssessment {
/// The underlying quality metrics (ISO 8000/25012)
pub metrics: QualityMetrics,
/// How the metrics were computed (exact, approximate, or mixed)
pub confidence: MetricConfidence,
}
impl QualityAssessment {
/// Create a new QualityAssessment with Exact confidence (full dataset)
pub fn exact(metrics: QualityMetrics) -> Self {
Self {
metrics,
confidence: MetricConfidence::Exact,
}
}
/// Create a new QualityAssessment with Approximate confidence (sampled)
pub fn approximate(
metrics: QualityMetrics,
sample_size: usize,
population_size: Option<usize>,
) -> Self {
Self {
metrics,
confidence: MetricConfidence::Approximate {
sample_size,
population_size,
},
}
}
/// Calculate overall quality score (0-100) using ISO 8000/25012 dimensions
pub fn score(&self) -> f64 {
self.metrics.overall_score()
}
}
impl From<QualityMetrics> for QualityAssessment {
/// Convert bare metrics into an assessment assuming exact confidence.
fn from(metrics: QualityMetrics) -> Self {
Self::exact(metrics)
}
}
/// Complete profiling report for a data source.
///
/// Contains column-level statistics, execution metadata, and an optional
/// ISO 8000/25012 quality assessment. This is the primary output of all
/// profiling operations (`Profiler::analyze_file`, `Profiler::analyze_source`,
/// `Profiler::profile_stream`, etc.).
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ProfileReport {
/// Unique identifier for this report (UUID v4)
pub id: String,
/// Timestamp when the report was generated (ISO 8601 / RFC 3339)
pub timestamp: String,
/// Data source metadata (file, query, etc.)
pub data_source: DataSource,
/// Column-level profiling results
pub column_profiles: Vec<ColumnProfile>,
/// Execution metadata (timing, rows processed, truncation info, etc.)
#[serde(alias = "scan_info")]
pub execution: ExecutionMetadata,
/// Data quality assessment (optional — partial analysis may skip quality)
#[serde(
alias = "data_quality_metrics",
skip_serializing_if = "Option::is_none",
default,
deserialize_with = "deserialize_quality_compat"
)]
pub quality: Option<QualityAssessment>,
}
impl ProfileReport {
/// Create a new ProfileReport with auto-generated id and timestamp
pub fn new(
data_source: DataSource,
column_profiles: Vec<ColumnProfile>,
execution: ExecutionMetadata,
quality: Option<QualityAssessment>,
) -> Self {
Self {
id: uuid::Uuid::new_v4().to_string(),
timestamp: chrono::Utc::now().to_rfc3339(),
data_source,
column_profiles,
execution,
quality,
}
}
/// Override the auto-generated ID (useful for deterministic caching/testing)
pub fn with_id(mut self, id: impl Into<String>) -> Self {
self.id = id.into();
self
}
/// Override the auto-generated timestamp
pub fn with_timestamp(mut self, timestamp: impl Into<String>) -> Self {
self.timestamp = timestamp.into();
self
}
/// Calculate overall quality score using ISO 8000/25012 metrics.
/// Returns `None` if quality metrics were not computed.
pub fn quality_score(&self) -> Option<f64> {
self.quality.as_ref().map(|q| q.score())
}
/// Get the data source identifier (for backwards compatibility)
pub fn source_identifier(&self) -> String {
self.data_source.identifier()
}
}
/// Custom deserializer that handles both legacy `DataQualityMetrics` (flat)
/// and new `QualityAssessment` (wrapped with confidence) JSON formats.
fn deserialize_quality_compat<'de, D>(
deserializer: D,
) -> Result<Option<QualityAssessment>, D::Error>
where
D: serde::Deserializer<'de>,
{
use serde::Deserialize;
// Try to deserialize as the new QualityAssessment first,
// then fall back to bare QualityMetrics (legacy format)
let value: Option<serde_json::Value> = Option::deserialize(deserializer)?;
match value {
None => Ok(None),
Some(v) => {
// Try new format first (has "metrics" and "confidence" fields)
if v.get("metrics").is_some() && v.get("confidence").is_some() {
let assessment: QualityAssessment =
serde_json::from_value(v).map_err(serde::de::Error::custom)?;
Ok(Some(assessment))
} else {
// Legacy format: bare QualityMetrics object
let metrics: QualityMetrics =
serde_json::from_value(v).map_err(serde::de::Error::custom)?;
Ok(Some(QualityAssessment::exact(metrics)))
}
}
}
}
/// Metadata specific to Parquet files
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ParquetMetadata {
/// Number of row groups in the Parquet file
pub num_row_groups: usize,
/// Compression codec used (e.g., "SNAPPY", "GZIP", "ZSTD", "UNCOMPRESSED")
pub compression: String,
/// Parquet file version (e.g., "1.0", "2.0")
pub version: i32,
/// Arrow schema as string representation
pub schema_summary: String,
/// Total compressed size in bytes
pub compressed_size_bytes: u64,
/// Estimated uncompressed size if available
pub uncompressed_size_bytes: Option<u64>,
}
/// Reason why profiling was truncated before exhausting the source
#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
pub enum TruncationReason {
/// Stopped after processing a maximum number of rows
MaxRows(u64),
/// Stopped after consuming a maximum number of bytes
MaxBytes(u64),
/// Stopped due to memory pressure
MemoryPressure,
/// Stopped due to a user-defined stop condition (see #220)
StopCondition(String),
/// The input stream was closed by the producer
StreamClosed,
/// Stopped due to a timeout
Timeout,
}
/// Metadata about the profiling execution — replaces the former `ScanInfo`.
///
/// Designed to work for both batch (file-based) and streaming scenarios.
/// For streams, `source_exhausted` indicates whether all data was consumed,
/// and `truncation_reason` explains why processing stopped early.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ExecutionMetadata {
/// Number of rows actually processed/analyzed
pub rows_processed: usize,
/// Number of bytes consumed from the source (if known)
#[serde(skip_serializing_if = "Option::is_none")]
pub bytes_consumed: Option<u64>,
/// Number of columns detected in the data
pub columns_detected: usize,
/// Total execution time in milliseconds
pub scan_time_ms: u128,
/// Throughput in rows per second (auto-calculated)
#[serde(skip_serializing_if = "Option::is_none")]
pub throughput_rows_sec: Option<f64>,
/// Peak memory usage in megabytes (if tracked)
#[serde(skip_serializing_if = "Option::is_none")]
pub memory_peak_mb: Option<f64>,
/// Number of errors encountered during profiling
pub error_count: usize,
/// Whether the entire source was consumed (false for truncated/partial analysis)
pub source_exhausted: bool,
/// If the source was not exhausted, why processing stopped
#[serde(skip_serializing_if = "Option::is_none")]
pub truncation_reason: Option<TruncationReason>,
/// Whether sampling was applied (i.e., not all rows were analyzed)
pub sampling_applied: bool,
/// Ratio of rows analyzed to total rows (only meaningful when source_exhausted=true)
#[serde(skip_serializing_if = "Option::is_none")]
pub sampling_ratio: Option<f64>,
}
impl ExecutionMetadata {
/// Create new ExecutionMetadata with throughput calculated automatically.
///
/// Defaults: `source_exhausted=true`, `sampling_applied=false`, no truncation.
pub fn new(rows_processed: usize, columns_detected: usize, scan_time_ms: u128) -> Self {
let throughput_rows_sec = if scan_time_ms > 0 {
Some(rows_processed as f64 / (scan_time_ms as f64 / 1000.0))
} else {
None
};
Self {
rows_processed,
bytes_consumed: None,
columns_detected,
scan_time_ms,
throughput_rows_sec,
memory_peak_mb: None,
error_count: 0,
source_exhausted: true,
truncation_reason: None,
sampling_applied: false,
sampling_ratio: None,
}
}
/// Set sampling information.
///
/// Note: this does **not** change `source_exhausted`. A file can be fully
/// read yet still sampled (e.g., skip every other row). Call
/// `.with_source_exhausted(false)` separately when the source was not
/// fully consumed.
pub fn with_sampling(mut self, ratio: f64) -> Self {
self.sampling_applied = true;
self.sampling_ratio = Some(ratio);
self
}
/// Explicitly set whether the source was fully consumed.
pub fn with_source_exhausted(mut self, exhausted: bool) -> Self {
self.source_exhausted = exhausted;
self
}
/// Mark as truncated (sets `source_exhausted=false`).
pub fn with_truncation(mut self, reason: TruncationReason) -> Self {
self.source_exhausted = false;
self.truncation_reason = Some(reason);
self
}
/// Set the number of bytes consumed from the source.
pub fn with_bytes_consumed(mut self, bytes: u64) -> Self {
self.bytes_consumed = Some(bytes);
self
}
/// Set the error count.
pub fn with_error_count(mut self, count: usize) -> Self {
self.error_count = count;
self
}
/// Set peak memory usage.
pub fn with_memory_peak_mb(mut self, mb: f64) -> Self {
self.memory_peak_mb = Some(mb);
self
}
}
/// Profiling statistics for a single column.
///
/// Includes data type, null counts, unique counts, type-specific statistics
/// (numeric, text, or datetime), and detected patterns (e.g. email, phone).
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ColumnProfile {
/// Column name
pub name: String,
/// Inferred data type
pub data_type: DataType,
/// Number of null/missing values
pub null_count: usize,
/// Total number of values (including nulls)
pub total_count: usize,
/// Number of distinct values (when computed)
pub unique_count: Option<usize>,
/// Type-specific statistics (numeric, text, or datetime)
pub stats: ColumnStats,
/// Detected value patterns (e.g. email, phone, UUID)
pub patterns: Vec<Pattern>,
}
/// Inferred column data type.
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum DataType {
/// Text / string values
String,
/// Whole numbers (i64 range)
Integer,
/// Floating-point numbers
Float,
/// Date or datetime values
Date,
/// Boolean (true / false) values
Boolean,
}
/// Quartile statistics for numeric distributions.
#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
pub struct Quartiles {
/// 25th percentile
pub q1: f64,
/// 50th percentile (median)
pub q2: f64,
/// 75th percentile
pub q3: f64,
/// Interquartile range (Q3 - Q1)
pub iqr: f64,
}
/// A value and its frequency count within a column.
#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
pub struct FrequencyItem {
/// The value as a string
pub value: String,
/// Number of occurrences
pub count: usize,
#[serde(serialize_with = "crate::serde_helpers::round_2")]
pub percentage: f64,
}
/// Statistics for numeric (integer or float) columns.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct NumericStats {
#[serde(serialize_with = "crate::serde_helpers::round_2")]
pub min: f64,
#[serde(serialize_with = "crate::serde_helpers::round_2")]
pub max: f64,