Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
add metric
  • Loading branch information
Atikin-NT committed Oct 30, 2024
commit 317986ef2e9fe94c311e7b346b8362dfdf6f54ec
8 changes: 4 additions & 4 deletions models/public/gcn/accuracy-check.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ models:
# module_config:
launchers:
- framework: DGL
adapter: classification
adapter: node_classification
device: CPU
model: C:\Users\Atikin\Desktop\Programming\open_model_zoo\tools\model_tools\src\omz_tools\public\gcn\gcn_model.pt
module: C:\Users\Atikin\Desktop\Programming\open_model_zoo\tools\model_tools\src\omz_tools\public\gcn\GCN.py
Expand All @@ -21,6 +21,6 @@ models:
converter: DGL_converter
graph_path: C:\Users\Atikin\Desktop\Programming\data_for_dl_benchmark\data.bin
metrics:
- name: accuracy
type: accuracy
reference: 0.7446
- name: node_accuracy_name
type: node_accuracy
reference: 0.778
6 changes: 5 additions & 1 deletion tools/accuracy_checker/accuracy_checker/adapters/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,8 @@

from .palm_detection import PalmDetectionAdapter

from .graphs import GraphNodeClassificationAdapter

__all__ = [
'Adapter',
'AdapterField',
Expand Down Expand Up @@ -298,5 +300,7 @@

'ImageBackgroundMattingAdapter',

'PalmDetectionAdapter'
'PalmDetectionAdapter',

'GraphNodeClassificationAdapter'
]
84 changes: 84 additions & 0 deletions tools/accuracy_checker/accuracy_checker/adapters/graphs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
import numpy as np

from ..adapters import Adapter
from ..config import BoolField, StringField, NumberField
from ..representation import ClassificationPrediction, ArgMaxClassificationPrediction
from ..utils import softmax


class GraphNodeClassificationAdapter(Adapter):
"""
Class for converting output of node classification model to ClassificationPrediction representation
"""
__provider__ = 'node_classification'
prediction_types = (ClassificationPrediction, )

@classmethod
def parameters(cls):
parameters = super().parameters()

return parameters

def configure(self):
self.label_as_array = self.get_value_from_config('label_as_array')
self.block = self.get_value_from_config('block')
self.classification_out = self.get_value_from_config('classification_output')
self.multilabel_thresh = self.get_value_from_config('multi_label_threshold')
self.output_verified = False

def select_output_blob(self, outputs):
self.output_verified = True
if self.classification_out:
self.classification_out = self.check_output_name(self.classification_out, outputs)
return
super().select_output_blob(outputs)
self.classification_out = self.output_blob
return

def process(self, raw, identifiers, frame_meta):
"""
Args:
identifiers: list of input data identifiers
raw: output of model
frame_meta: list of meta information about each frame
Returns:
list of ClassificationPrediction objects
"""
if not self.output_verified:
self.select_output_blob(raw)
multi_infer = frame_meta[-1].get('multi_infer', False) if frame_meta else False
raw_prediction = self._extract_predictions(raw, frame_meta) # ok
prediction = raw_prediction[self.output_blob] # тензор предиктов
if multi_infer:
prediction = np.mean(prediction, axis=0)
if len(np.shape(prediction)) == 1:
prediction = np.expand_dims(prediction, axis=0)
prediction = np.reshape(prediction, (prediction.shape[0], -1))

result = []
if self.block:
result.append(self.prepare_representation(identifiers[0], prediction))
else:
for identifier, output in zip(identifiers, prediction):
result.append(self.prepare_representation(identifier, output))

return result

def prepare_representation(self, identifier, prediction):
single_prediction = ClassificationPrediction(
identifier, prediction, self.label_as_array,
multilabel_threshold=self.multilabel_thresh)
return single_prediction

@staticmethod
def _extract_predictions(outputs_list, meta):
is_multi_infer = meta[-1].get('multi_infer', False) if meta else False
if not is_multi_infer:
return outputs_list[0] if not isinstance(outputs_list, dict) else outputs_list

output_map = {}
for output_key in outputs_list[0].keys():
output_data = np.asarray([output[output_key] for output in outputs_list])
output_map[output_key] = output_data

return output_map
6 changes: 5 additions & 1 deletion tools/accuracy_checker/accuracy_checker/metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,8 @@
from .clip_score import ClipScore
from .matches_homography import MatchesHomography

from .graph import ClassificationGraphAccuracy

__all__ = [
'Metric',
'MetricsExecutor',
Expand Down Expand Up @@ -266,5 +268,7 @@
'MeanSquaredErrorWithMask',

'ClipScore',
'MatchesHomography'
'MatchesHomography',

'ClassificationGraphAccuracy'
]
102 changes: 102 additions & 0 deletions tools/accuracy_checker/accuracy_checker/metrics/graph.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
import numpy as np

from ..representation import (
ClassificationAnnotation,
ClassificationPrediction,
TextClassificationAnnotation,
UrlClassificationAnnotation,
ArgMaxClassificationPrediction,
AnomalySegmentationAnnotation,
AnomalySegmentationPrediction
)

from .classification import ClassificationProfilingSummaryHelper

from ..config import NumberField, StringField, ConfigError, BoolField
from .metric import Metric, PerImageEvaluationMetric
from .average_meter import AverageMeter
from ..utils import UnsupportedPackage

try:
from sklearn.metrics import accuracy_score, confusion_matrix
except ImportError as import_error:
accuracy_score = UnsupportedPackage("sklearn.metric.accuracy_score", import_error.msg)
confusion_matrix = UnsupportedPackage("sklearn.metric.confusion_matrix", import_error.msg)



class ClassificationGraphAccuracy(PerImageEvaluationMetric):
"""
Class for evaluating accuracy metric of classification models.
"""

__provider__ = 'node_accuracy'

annotation_types = (ClassificationAnnotation, TextClassificationAnnotation)
prediction_types = (ClassificationPrediction, ArgMaxClassificationPrediction)

@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'top_k': NumberField(
value_type=int, min_value=1, optional=True, default=1,
description="The number of classes with the highest probability, which will be used to decide "
"if prediction is correct."
),
'match': BoolField(optional=True, default=False),
'cast_to_int': BoolField(optional=True, default=False)
})

return parameters

def configure(self):
self.top_k = self.get_value_from_config('top_k')
self.match = self.get_value_from_config('match')
self.cast_to_int = self.get_value_from_config('cast_to_int')
self.summary_helper = None

def loss(annotation_label, prediction_top_k_labels):
return int(annotation_label in prediction_top_k_labels)

if isinstance(accuracy_score, UnsupportedPackage):
accuracy_score.raise_error(self.__provider__)
self.accuracy = []
if self.profiler:
self.summary_helper = ClassificationProfilingSummaryHelper()

def set_profiler(self, profiler):
self.profiler = profiler
self.summary_helper = ClassificationProfilingSummaryHelper()

def update(self, annotation, prediction):
pred_labels = prediction.scores

accuracy = accuracy_score(annotation.label, pred_labels)
self.accuracy.append(accuracy)

if self.profiler:
self.summary_helper.submit_data(annotation.label, prediction.top_k(self.top_k), prediction.scores)
self.profiler.update(
annotation.identifier, annotation.label, prediction.top_k(self.top_k), self.name, accuracy,
prediction.scores
)
return accuracy

def evaluate(self, annotations, predictions):
if self.profiler:
self.profiler.finish()
summary = self.summary_helper.get_summary_report()
self.profiler.write_summary(summary)
else:
accuracy = np.mean(self.accuracy)
return accuracy

def reset(self):
if not self.match:
self.accuracy.reset()
else:
self.accuracy = []

if self.profiler:
self.profiler.reset()