Flutter人脸识别插件flutter_face_api_beta的使用
Flutter人脸识别插件flutter_face_api_beta的使用
概述
flutter_face_api_beta
是一个用于在Flutter应用中进行人脸识别、匹配和活体检测的插件。该插件基于Regula Face SDK,提供了强大的人脸识别功能。
文档和支持
- 文档: Regula Face SDK 文档
- 支持: 如果需要任何帮助或想报告bug / 提供建议,请联系 支持中心
示例代码
以下是一个完整的示例demo,展示如何使用 flutter_face_api_beta
插件进行人脸识别和活体检测:
import 'dart:io';
import 'package:flutter/material.dart';
import 'package:flutter/services.dart';
import 'dart:async';
import 'package:flutter_face_api_beta/flutter_face_api.dart';
import 'package:image_picker/image_picker.dart';
void main() => runApp(new MaterialApp(home: new MyApp()));
class _MyAppState extends State<MyApp> {
var faceSdk = FaceSDK.instance;
var _status = "nil";
var _similarityStatus = "nil";
var _livenessStatus = "nil";
var _uiImage1 = Image.asset('assets/images/portrait.png');
var _uiImage2 = Image.asset('assets/images/portrait.png');
set status(String val) => setState(() => _status = val);
set similarityStatus(String val) => setState(() => _similarityStatus = val);
set livenessStatus(String val) => setState(() => _livenessStatus = val);
set uiImage1(Image val) => setState(() => _uiImage1 = val);
set uiImage2(Image val) => setState(() => _uiImage2 = val);
MatchFacesImage? mfImage1;
MatchFacesImage? mfImage2;
void init() async {
super.initState();
if (!await initialize()) return;
status = "Ready";
}
startLiveness() async {
var result = await faceSdk.startLiveness(
config: LivenessConfig(skipStep: [LivenessSkipStep.ONBOARDING_STEP]),
notificationCompletion: (notification) {
print(notification.status);
},
);
if (result.image == null) return;
setImage(result.image!, ImageType.LIVE, 1);
livenessStatus = result.liveness.name.toLowerCase();
}
matchFaces() async {
if (mfImage1 == null || mfImage2 == null) {
status = "Both images required!";
return;
}
status = "Processing...";
var request = MatchFacesRequest([mfImage1!, mfImage2!]);
var response = await faceSdk.matchFaces(request);
var split = await faceSdk.splitComparedFaces(response.results, 0.75);
var match = split.matchedFaces;
similarityStatus = "failed";
if (match.isNotEmpty) {
similarityStatus = (match[0].similarity * 100).toStringAsFixed(2) + "%";
}
status = "Ready";
}
clearResults() {
status = "Ready";
similarityStatus = "nil";
livenessStatus = "nil";
uiImage2 = Image.asset('assets/images/portrait.png');
uiImage1 = Image.asset('assets/images/portrait.png');
mfImage1 = null;
mfImage2 = null;
}
Future<bool> initialize() async {
status = "Initializing...";
var license = await loadAssetIfExists("assets/regula.license");
InitConfig? config = null;
if (license != null) config = InitConfig(license);
var (success, error) = await faceSdk.initialize(config: config);
if (!success) {
status = error!.message;
print("${error.code}: ${error.message}");
}
return success;
}
Future<ByteData?> loadAssetIfExists(String path) async {
try {
return await rootBundle.load(path);
} catch (_) {
return null;
}
}
setImage(Uint8List bytes, ImageType type, int number) {
similarityStatus = "nil";
var mfImage = MatchFacesImage(bytes, type);
if (number == 1) {
mfImage1 = mfImage;
uiImage1 = Image.memory(bytes);
livenessStatus = "nil";
}
if (number == 2) {
mfImage2 = mfImage;
uiImage2 = Image.memory(bytes);
}
}
Widget useGallery(int number) {
return textButton("Use gallery", () async {
Navigator.pop(context);
var image = await ImagePicker().pickImage(source: ImageSource.gallery);
if (image != null) {
setImage(File(image.path).readAsBytesSync(), ImageType.PRINTED, number);
}
});
}
Widget useCamera(int number) {
return textButton("Use camera", () async {
Navigator.pop(context);
var response = await faceSdk.startFaceCapture();
var image = response.image;
if (image != null) setImage(image.image, image.imageType, number);
});
}
Widget image(Image image, Function() onTap) => GestureDetector(
onTap: onTap,
child: Image(height: 150, width: 150, image: image.image),
);
Widget button(String text, Function() onPressed) {
return Container(
child: textButton(text, onPressed,
style: ButtonStyle(
backgroundColor: WidgetStateProperty.all<Color>(Colors.black12),
)),
width: 250,
);
}
Widget text(String text) => Text(text, style: TextStyle(fontSize: 18));
Widget textButton(String text, Function() onPressed, {ButtonStyle? style}) =>
TextButton(
child: Text(text),
onPressed: onPressed,
style: style,
);
setImageDialog(BuildContext context, int number) => showDialog(
context: context,
builder: (BuildContext context) => AlertDialog(
title: Text("Select option"),
actions: [useGallery(number), useCamera(number)],
),
);
@override
Widget build(BuildContext bc) {
return Scaffold(
appBar: AppBar(title: Center(child: Text(_status))),
body: Container(
margin: EdgeInsets.fromLTRB(0, 0, 0, MediaQuery.of(bc).size.height / 8),
width: double.infinity,
child: Column(
mainAxisAlignment: MainAxisAlignment.center,
children: <Widget>[
image(_uiImage1, () => setImageDialog(bc, 1)),
image(_uiImage2, () => setImageDialog(bc, 2)),
Container(margin: EdgeInsets.fromLTRB(0, 0, 0, 15)),
button("Match", () => matchFaces()),
button("Liveness", () => startLiveness()),
button("Clear", () => clearResults()),
Container(margin: EdgeInsets.fromLTRB(0, 15, 0, 0)),
Row(
mainAxisAlignment: MainAxisAlignment.center,
children: [
text("Similarity: " + _similarityStatus),
Container(margin: EdgeInsets.fromLTRB(20, 0, 0, 0)),
text("Liveness: " + _livenessStatus)
],
)
],
),
),
);
}
@override
void initState() {
super.initState();
init();
}
}
class MyApp extends StatefulWidget {
@override
_MyAppState createState() => _MyAppState();
}
主要功能
- 初始化: 通过加载许可证文件(如果存在)来初始化SDK。
- 图像选择: 支持从相册或相机获取人脸图像。
- 活体检测: 使用
startLiveness
方法进行活体检测。 - 人脸匹配: 使用
matchFaces
方法进行两个人脸图像的相似度匹配。 - 结果显示: 在UI上显示匹配结果和活体检测状态。
希望这个示例能帮助您快速上手 flutter_face_api_beta
插件,并将其集成到您的Flutter项目中。如果有任何问题,请参考官方文档或联系技术支持。
更多关于Flutter人脸识别插件flutter_face_api_beta的使用的实战教程也可以访问 https://www.itying.com/category-92-b0.html
更多关于Flutter人脸识别插件flutter_face_api_beta的使用的实战系列教程也可以访问 https://www.itying.com/category-92-b0.html
当然,下面是一个关于如何使用Flutter的人脸识别插件flutter_face_api_beta
的代码示例。这个示例将展示如何进行基本的人脸检测操作。请注意,flutter_face_api_beta
插件可能还在开发中,API和功能可能会有所变化,因此请确保查阅最新的官方文档和插件版本。
首先,确保你已经在pubspec.yaml
文件中添加了flutter_face_api_beta
依赖:
dependencies:
flutter:
sdk: flutter
flutter_face_api_beta: ^最新版本号 # 请替换为实际的最新版本号
然后运行flutter pub get
来获取依赖。
接下来,是一个简单的Flutter应用示例,展示如何使用flutter_face_api_beta
进行人脸检测:
import 'package:flutter/material.dart';
import 'package:flutter_face_api_beta/flutter_face_api_beta.dart';
import 'dart:typed_data';
import 'dart:ui' as ui;
void main() {
runApp(MyApp());
}
class MyApp extends StatelessWidget {
@override
Widget build(BuildContext context) {
return MaterialApp(
home: FaceDetectionScreen(),
);
}
}
class FaceDetectionScreen extends StatefulWidget {
@override
_FaceDetectionScreenState createState() => _FaceDetectionScreenState();
}
class _FaceDetectionScreenState extends State<FaceDetectionScreen> {
List<FaceDetectionResult> _faceResults = [];
Uint8List? _imageBytes;
Future<void> _pickImage() async {
final pickedFile = await ImagePicker().pickImage(source: ImageSource.camera);
if (pickedFile != null) {
final File imageFile = File(pickedFile.path);
_imageBytes = await imageFile.readAsBytes();
_detectFaces(_imageBytes!);
}
}
Future<void> _detectFaces(Uint8List imageBytes) async {
final FaceDetector faceDetector = FaceDetector();
try {
final faceResults = await faceDetector.detectFaces(imageBytes);
setState(() {
_faceResults = faceResults;
});
} catch (e) {
print("Error detecting faces: $e");
}
}
@override
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(
title: Text('Face Detection with Flutter'),
),
body: Center(
child: Column(
mainAxisAlignment: MainAxisAlignment.center,
children: [
ElevatedButton(
onPressed: _pickImage,
child: Text('Pick Image from Camera'),
),
SizedBox(height: 20),
if (_imageBytes != null)
Container(
width: 300,
height: 300,
child: CustomPaint(
painter: FacePainter(_imageBytes!, _faceResults),
),
),
],
),
),
);
}
}
class FacePainter extends CustomPainter {
final Uint8List imageBytes;
final List<FaceDetectionResult> faceResults;
FacePainter(this.imageBytes, this.faceResults);
@override
void paint(Canvas canvas, Size size) {
final Paint paint = Paint()
..color = Colors.red
..style = PaintingStyle.stroke
..strokeWidth = 2.0;
ui.Image? image;
image = ui.decodeImage(imageBytes)!;
final paintImage = PaintImage(
image: image!,
fit: BoxFit.cover,
);
canvas.drawImageRect(
paintImage,
Rect.fromLTWH(0, 0, image!.width.toDouble(), image!.height.toDouble()),
Rect.fromLTWH(0, 0, size.width, size.height),
Paint(),
);
for (var face in faceResults) {
final Rect faceRect = Rect.fromLTWH(
face.boundingBox.left.toDouble(),
face.boundingBox.top.toDouble(),
face.boundingBox.width.toDouble(),
face.boundingBox.height.toDouble(),
);
canvas.drawRect(faceRect, paint);
}
}
@override
bool shouldRepaint(covariant CustomPainter oldDelegate) {
return true;
}
}
代码解释:
- 依赖管理:在
pubspec.yaml
文件中添加flutter_face_api_beta
依赖。 - UI结构:使用
MaterialApp
和Scaffold
构建基本的UI结构。 - 图像选择:使用
ImagePicker
插件从相机选择图像。 - 人脸检测:使用
flutter_face_api_beta
插件的FaceDetector
类进行人脸检测。 - 绘制结果:使用
CustomPainter
在图像上绘制检测到的人脸边界框。
注意事项:
- 确保在Android和iOS项目中配置了相应的相机权限。
- 由于
flutter_face_api_beta
可能还在开发中,API和功能可能会有所变化,因此请查阅最新的官方文档。 - 本示例中未处理图像缩放和旋转,实际应用中可能需要考虑这些因素以确保人脸检测的准确性。
希望这个示例对你有所帮助!