Skip to content
This repository was archived by the owner on Feb 22, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion packages/firebase_ml_vision/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
## 0.4.0

* **Breaking Change** Removal of base detector class `FirebaseVisionDetector`.
* **Breaking Change** Removal of `TextRecognizer.detectInImage()`. Pleas use `TextRecognizer.processImage()`.
* **Breaking Change** Changed `FaceDetector.detectInImage()` to `FaceDetector.processImage()`.

## 0.3.0
* **Breaking change**. Migrate from the deprecated original Android Support

* **Breaking Change** Migrate from the deprecated original Android Support
Library to AndroidX. This shouldn't result in any functional changes, but it
requires any Android apps using this plugin to [also
migrate](https://developer.android.com/jetpack/androidx/migrate) if they're
Expand Down
6 changes: 3 additions & 3 deletions packages/firebase_ml_vision/README.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# ML Kit for Firebase
# ML Kit Vision for Firebase

[![pub package](https://img.shields.io/pub/v/firebase_ml_vision.svg)](https://pub.dartlang.org/packages/firebase_ml_vision)

A Flutter plugin to use the [ML Kit for Firebase API](https://firebase.google.com/docs/ml-kit/).
A Flutter plugin to use the [ML Kit Vision for Firebase API](https://firebase.google.com/docs/ml-kit/).

For Flutter plugins for other Firebase products, see [FlutterFire.md](https://github.com/flutter/plugins/blob/master/FlutterFire.md).

Expand Down Expand Up @@ -153,4 +153,4 @@ for (TextBlock block in visionText.blocks) {

## Getting Started

See the `example` directory for a complete sample app using ML Kit for Firebase.
See the `example` directory for a complete sample app using ML Kit Vision for Firebase.
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ public void onMethodCall(MethodCall call, Result result) {
case "BarcodeDetector#detectInImage":
BarcodeDetector.instance.handleDetection(image, options, result);
break;
case "FaceDetector#detectInImage":
case "FaceDetector#processImage":
FaceDetector.instance.handleDetection(image, options, result);
break;
case "LabelDetector#detectInImage":
Expand Down
23 changes: 14 additions & 9 deletions packages/firebase_ml_vision/example/lib/main.dart
Original file line number Diff line number Diff line change
Expand Up @@ -70,30 +70,35 @@ class _MyHomePageState extends State<_MyHomePage> {
final FirebaseVisionImage visionImage =
FirebaseVisionImage.fromFile(imageFile);

FirebaseVisionDetector detector;
dynamic results;
switch (_currentDetector) {
case Detector.barcode:
detector = FirebaseVision.instance.barcodeDetector();
final BarcodeDetector detector =
FirebaseVision.instance.barcodeDetector();
results = await detector.detectInImage(visionImage);
break;
case Detector.face:
detector = FirebaseVision.instance.faceDetector();
final FaceDetector detector = FirebaseVision.instance.faceDetector();
results = await detector.processImage(visionImage);
break;
case Detector.label:
detector = FirebaseVision.instance.labelDetector();
final LabelDetector detector = FirebaseVision.instance.labelDetector();
results = await detector.detectInImage(visionImage);
break;
case Detector.cloudLabel:
detector = FirebaseVision.instance.cloudLabelDetector();
final CloudLabelDetector detector =
FirebaseVision.instance.cloudLabelDetector();
results = await detector.detectInImage(visionImage);
break;
case Detector.text:
detector = FirebaseVision.instance.textRecognizer();
final TextRecognizer recognizer =
FirebaseVision.instance.textRecognizer();
results = await recognizer.processImage(visionImage);
break;
default:
return;
}

final dynamic results =
await detector.detectInImage(visionImage) ?? <dynamic>[];

setState(() {
_scanResults = results;
});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ - (void)handleMethodCall:(FlutterMethodCall *)call result:(FlutterResult)result
NSDictionary *options = call.arguments[@"options"];
if ([@"BarcodeDetector#detectInImage" isEqualToString:call.method]) {
[BarcodeDetector handleDetection:image options:options result:result];
} else if ([@"FaceDetector#detectInImage" isEqualToString:call.method]) {
} else if ([@"FaceDetector#processImage" isEqualToString:call.method]) {
[FaceDetector handleDetection:image options:options result:result];
} else if ([@"LabelDetector#detectInImage" isEqualToString:call.method]) {
[LabelDetector handleDetection:image options:options result:result];
Expand Down
16 changes: 10 additions & 6 deletions packages/firebase_ml_vision/lib/src/barcode_detector.dart
Original file line number Diff line number Diff line change
Expand Up @@ -170,21 +170,25 @@ class BarcodeFormat {

/// Detector for performing barcode scanning on an input image.
///
/// A barcode detector is created via barcodeDetector() in [FirebaseVision]:
/// A barcode detector is created via
/// `barcodeDetector([BarcodeDetectorOptions options])` in [FirebaseVision]:
///
/// ```dart
/// BarcodeDetector barcodeDetector = FirebaseVision.instance.barcodeDetector();
/// final FirebaseVisionImage image =
/// FirebaseVisionImage.fromFilePath('path/to/file');
///
/// final BarcodeDetector barcodeDetector =
/// FirebaseVision.instance.barcodeDetector();
///
/// final List<Barcode> barcodes = await barcodeDetector.detectInImage(image);
/// ```
class BarcodeDetector extends FirebaseVisionDetector {
class BarcodeDetector {
BarcodeDetector._(this.options) : assert(options != null);

/// The options for configuring this detector.
final BarcodeDetectorOptions options;

/// Detects barcodes in the input image.
///
/// The barcode scanning is performed asynchronously.
@override
Future<List<Barcode>> detectInImage(FirebaseVisionImage visionImage) async {
// TODO(amirh): remove this on when the invokeMethod update makes it to stable Flutter.
// https://github.com/flutter/flutter/issues/26431
Expand Down
18 changes: 11 additions & 7 deletions packages/firebase_ml_vision/lib/src/face_detector.dart
Original file line number Diff line number Diff line change
Expand Up @@ -26,26 +26,30 @@ enum FaceLandmarkType {

/// Detector for detecting faces in an input image.
///
/// A face detector is created via faceDetector(FaceDetectorOptions options)
/// in [FirebaseVision]:
/// A face detector is created via
/// `faceDetector([FaceDetectorOptions options])` in [FirebaseVision]:
///
/// ```dart
/// FaceDetector faceDetector = FirebaseVision.instance.faceDetector(options);
/// final FirebaseVisionImage image =
/// FirebaseVisionImage.fromFilePath('path/to/file');
///
/// final FaceDetector faceDetector = FirebaseVision.instance.faceDetector();
///
/// final List<Faces> faces = await faceDetector.processImage(image);
/// ```
class FaceDetector extends FirebaseVisionDetector {
class FaceDetector {
FaceDetector._(this.options) : assert(options != null);

/// The options for the face detector.
final FaceDetectorOptions options;

/// Detects faces in the input image.
@override
Future<List<Face>> detectInImage(FirebaseVisionImage visionImage) async {
Future<List<Face>> processImage(FirebaseVisionImage visionImage) async {
// TODO(amirh): remove this on when the invokeMethod update makes it to stable Flutter.
// https://github.com/flutter/flutter/issues/26431
// ignore: strong_mode_implicit_dynamic_method
final List<dynamic> reply = await FirebaseVision.channel.invokeMethod(
'FaceDetector#detectInImage',
'FaceDetector#processImage',
<String, dynamic>{
'options': <String, dynamic>{
'enableClassification': options.enableClassification,
Expand Down
6 changes: 0 additions & 6 deletions packages/firebase_ml_vision/lib/src/firebase_vision.dart
Original file line number Diff line number Diff line change
Expand Up @@ -229,12 +229,6 @@ class FirebaseVisionImageMetadata {
};
}

/// Abstract class for detectors in [FirebaseVision] API.
abstract class FirebaseVisionDetector {
/// Uses machine learning model to detect objects of interest in an image.
Future<dynamic> detectInImage(FirebaseVisionImage visionImage);
}

String _enumToString(dynamic enumValue) {
final String enumString = enumValue.toString();
return enumString.substring(enumString.indexOf('.') + 1);
Expand Down
34 changes: 20 additions & 14 deletions packages/firebase_ml_vision/lib/src/label_detector.dart
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,19 @@ part of firebase_ml_vision;
/// this information, you can perform tasks such as automatic metadata
/// generation and content moderation.
///
/// A label detector is created via labelDetector(LabelDetectorOptions options)
/// in [FirebaseVision]:
/// A label detector is created via
/// `labelDetector([LabelDetectorOptions options])` in [FirebaseVision]:
///
/// ```dart
/// LabelDetector labelDetector = FirebaseVision.instance.labelDetector(options);
/// final FirebaseVisionImage image =
/// FirebaseVisionImage.fromFilePath('path/to/file');
///
/// final LabelDetector labelDetector =
/// FirebaseVision.instance.labelDetector(options);
///
/// final List<Label> labels = await labelDetector.detectInImage(image);
/// ```
class LabelDetector extends FirebaseVisionDetector {
class LabelDetector {
LabelDetector._(this.options) : assert(options != null);

/// The options for the detector.
Expand All @@ -27,9 +33,6 @@ class LabelDetector extends FirebaseVisionDetector {
final LabelDetectorOptions options;

/// Detects entities in the input image.
///
/// Performed asynchronously.
@override
Future<List<Label>> detectInImage(FirebaseVisionImage visionImage) async {
// TODO(amirh): remove this on when the invokeMethod update makes it to stable Flutter.
// https://github.com/flutter/flutter/issues/26431
Expand Down Expand Up @@ -62,22 +65,25 @@ class LabelDetector extends FirebaseVisionDetector {
/// this information, you can perform tasks such as automatic metadata
/// generation and content moderation.
///
/// A cloud label detector is created via cloudLabelDetector(CloudDetectorOptions options)
/// in [FirebaseVision]:
/// A cloud label detector is created via
/// `cloudLabelDetector([CloudDetectorOptions options])` in [FirebaseVision]:
///
/// ```dart
/// CloudLabelDetector cloudLabelDetector = FirebaseVision.instance.cloudLabelDetector(options);
/// final FirebaseVisionImage image =
/// FirebaseVisionImage.fromFilePath('path/to/file');
///
/// final CloudLabelDetector cloudLabelDetector =
/// FirebaseVision.instance.cloudLabelDetector();
///
/// final List<Label> labels = await cloudLabelDetector.detectInImage(image);
/// ```
class CloudLabelDetector extends FirebaseVisionDetector {
class CloudLabelDetector {
CloudLabelDetector._(this.options) : assert(options != null);

/// Options used to configure this cloud detector.
final CloudDetectorOptions options;

/// Detects entities in the input image.
///
/// Performed asynchronously.
@override
Future<List<Label>> detectInImage(FirebaseVisionImage visionImage) async {
// TODO(amirh): remove this on when the invokeMethod update makes it to stable Flutter.
// https://github.com/flutter/flutter/issues/26431
Expand Down
22 changes: 9 additions & 13 deletions packages/firebase_ml_vision/lib/src/text_recognizer.dart
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,19 @@ part of firebase_ml_vision;
/// A text recognizer is created via `textRecognizer()` in [FirebaseVision]:
///
/// ```dart
/// TextRecognizer textRecognizer = FirebaseVision.instance.textRecognizer();
/// final FirebaseVisionImage image =
/// FirebaseVisionImage.fromFilePath('path/to/file');
///
/// final TextRecognizer textRecognizer =
/// FirebaseVision.instance.textRecognizer();
///
/// final List<VisionText> recognizedText =
/// await textRecognizer.processImage(image);
/// ```
class TextRecognizer implements FirebaseVisionDetector {
class TextRecognizer {
TextRecognizer._();

/// Detects [VisionText] from a [FirebaseVisionImage].
///
/// The OCR is performed asynchronously.
Future<VisionText> processImage(FirebaseVisionImage visionImage) async {
final Map<dynamic, dynamic> reply =
// TODO(amirh): remove this on when the invokeMethod update makes it to stable Flutter.
Expand All @@ -31,15 +36,6 @@ class TextRecognizer implements FirebaseVisionDetector {

return VisionText._(reply);
}

/// Detects [VisionText] from a [FirebaseVisionImage].
///
/// The OCR is performed asynchronously.
@Deprecated('Please use `processImage`')
@override
Future<VisionText> detectInImage(FirebaseVisionImage visionImage) async {
return processImage(visionImage);
}
}

/// Recognized text in an image.
Expand Down
5 changes: 2 additions & 3 deletions packages/firebase_ml_vision/pubspec.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
name: firebase_ml_vision
description: Flutter plugin for Google ML Vision for Firebase, an SDK that brings Google's machine
learning expertise to Android and iOS apps in a powerful yet easy-to-use package.
description: Flutter plugin for Firebase machine learning vision services.
author: Flutter Team <[email protected]>
homepage: https://github.com/flutter/plugins/tree/master/packages/firebase_ml_vision
version: 0.3.0
version: 0.4.0

dependencies:
flutter:
Expand Down
16 changes: 8 additions & 8 deletions packages/firebase_ml_vision/test/firebase_ml_vision_test.dart
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ void main() {
switch (methodCall.method) {
case 'BarcodeDetector#detectInImage':
return returnValue;
case 'FaceDetector#detectInImage':
case 'FaceDetector#processImage':
return returnValue;
case 'LabelDetector#detectInImage':
return returnValue;
Expand Down Expand Up @@ -528,7 +528,7 @@ void main() {
];
});

test('detectInImage', () async {
test('processImage', () async {
returnValue = testFaces;

final FaceDetector detector = FirebaseVision.instance.faceDetector(
Expand All @@ -545,11 +545,11 @@ void main() {
'empty',
);

final List<Face> faces = await detector.detectInImage(image);
final List<Face> faces = await detector.processImage(image);

expect(log, <Matcher>[
isMethodCall(
'FaceDetector#detectInImage',
'FaceDetector#processImage',
arguments: <String, dynamic>{
'type': 'file',
'path': 'empty',
Expand Down Expand Up @@ -595,7 +595,7 @@ void main() {
expect(p(FaceLandmarkType.rightMouth), const Point<double>(18.1, 19.1));
});

test('detectInImage with null landmark', () async {
test('processImage with null landmark', () async {
testFaces[0]['landmarks']['bottomMouth'] = null;
returnValue = testFaces;

Expand All @@ -606,12 +606,12 @@ void main() {
'empty',
);

final List<Face> faces = await detector.detectInImage(image);
final List<Face> faces = await detector.processImage(image);

expect(faces[0].getLandmark(FaceLandmarkType.bottomMouth), isNull);
});

test('detectInImage no faces', () async {
test('processImage no faces', () async {
returnValue = <dynamic>[];

final FaceDetector detector = FirebaseVision.instance.faceDetector(
Expand All @@ -621,7 +621,7 @@ void main() {
'empty',
);

final List<Face> faces = await detector.detectInImage(image);
final List<Face> faces = await detector.processImage(image);
expect(faces, isEmpty);
});
});
Expand Down