Flutter人脸识别插件flutter_face_api的使用

发布于 1周前 作者 sinazl 来自 Flutter

Flutter人脸识别插件flutter_face_api的使用

Regula Face SDK plugin for Flutter

pub package

Face SDK是一个用于面部匹配、识别和活体检测的框架。它提供了强大的功能,可以帮助开发者在他们的应用程序中实现高效的人脸识别能力。

文档

你可以在这里找到更多文档:Regula Face SDK Documentation

支持

如果你需要任何帮助或想要报告错误/提出改进建议,请不要犹豫联系我们的支持团队:Contact Support

示例代码与完整Demo

以下是一个完整的示例,演示了如何使用flutter_face_api插件进行人脸匹配和活体检测。

import 'dart:io';
import 'package:flutter/material.dart';
import 'package:flutter/services.dart';
import 'package:flutter_face_api/flutter_face_api.dart';
import 'package:image_picker/image_picker.dart';

void main() => runApp(new MaterialApp(home: new MyApp()));

class _MyAppState extends State<MyApp> {
  var faceSdk = FaceSDK.instance;

  // 状态信息
  String _status = "nil";
  String _similarityStatus = "nil";
  String _livenessStatus = "nil";

  // 图像展示
  Image _uiImage1 = Image.asset('assets/images/portrait.png');
  Image _uiImage2 = Image.asset('assets/images/portrait.png');

  // 设置状态信息
  set status(String val) => setState(() => _status = val);
  set similarityStatus(String val) => setState(() => _similarityStatus = val);
  set livenessStatus(String val) => setState(() => _livenessStatus = val);
  set uiImage1(Image val) => setState(() => _uiImage1 = val);
  set uiImage2(Image val) => setState(() => _uiImage2 = val);

  MatchFacesImage? mfImage1;
  MatchFacesImage? mfImage2;

  @override
  void initState() {
    super.initState();
    SystemChrome.setPreferredOrientations([DeviceOrientation.portraitUp]);
    init();
  }

  // 初始化SDK
  Future<bool> initialize() async {
    status = "Initializing...";
    var license = await loadAssetIfExists("assets/regula.license");
    InitConfig? config = null;
    if (license != null) config = InitConfig(license);
    var (success, error) = await faceSdk.initialize(config: config);
    if (!success) {
      status = error!.message;
      print("${error.code}: ${error.message}");
    } else {
      status = "Ready";
    }
    return success;
  }

  // 加载资产文件(如果存在)
  Future<ByteData?> loadAssetIfExists(String path) async {
    try {
      return await rootBundle.load(path);
    } catch (_) {
      return null;
    }
  }

  // 开始活体检测
  startLiveness() async {
    var result = await faceSdk.startLiveness(
      config: LivenessConfig(skipStep: [LivenessSkipStep.ONBOARDING_STEP]),
      notificationCompletion: (notification) {
        print(notification.status);
      },
    );
    if (result.image == null) return;
    setImage(result.image!, ImageType.LIVE, 1);
    livenessStatus = result.liveness.name.toLowerCase();
  }

  // 匹配人脸
  matchFaces() async {
    if (mfImage1 == null || mfImage2 == null) {
      status = "Both images required!";
      return;
    }
    status = "Processing...";
    var request = MatchFacesRequest([mfImage1!, mfImage2!]);
    var response = await faceSdk.matchFaces(request);
    var split = await faceSdk.splitComparedFaces(response.results, 0.75);
    var match = split.matchedFaces;
    similarityStatus = "failed";
    if (match.isNotEmpty) {
      similarityStatus = (match[0].similarity * 100).toStringAsFixed(2) + "%";
    }
    status = "Ready";
  }

  // 清除结果
  clearResults() {
    status = "Ready";
    similarityStatus = "nil";
    livenessStatus = "nil";
    uiImage2 = Image.asset('assets/images/portrait.png');
    uiImage1 = Image.asset('assets/images/portrait.png');
    mfImage1 = null;
    mfImage2 = null;
  }

  // 设置图像
  setImage(Uint8List bytes, ImageType type, int number) {
    similarityStatus = "nil";
    var mfImage = MatchFacesImage(bytes, type);
    if (number == 1) {
      mfImage1 = mfImage;
      uiImage1 = Image.memory(bytes);
      livenessStatus = "nil";
    }
    if (number == 2) {
      mfImage2 = mfImage;
      uiImage2 = Image.memory(bytes);
    }
  }

  // 使用图库选择图片
  Widget useGallery(int number) {
    return TextButton(
      child: Text("Use gallery"),
      onPressed: () async {
        Navigator.pop(context);
        var image = await ImagePicker().pickImage(source: ImageSource.gallery);
        if (image != null) {
          setImage(File(image.path).readAsBytesSync(), ImageType.PRINTED, number);
        }
      },
    );
  }

  // 使用相机拍摄照片
  Widget useCamera(int number) {
    return TextButton(
      child: Text("Use camera"),
      onPressed: () async {
        Navigator.pop(context);
        var response = await faceSdk.startFaceCapture();
        var image = response.image;
        if (image != null) setImage(image.image, image.imageType, number);
      },
    );
  }

  // 显示图像
  Widget image(Image image, Function() onTap) => GestureDetector(
        onTap: onTap,
        child: Image(height: 150, width: 150, image: image.image),
      );

  // 创建按钮
  Widget button(String text, Function() onPressed) {
    return Container(
      child: TextButton(
        child: Text(text),
        onPressed: onPressed,
        style: ButtonStyle(
          backgroundColor: MaterialStateProperty.all<Color>(Colors.black12),
        ),
      ),
      width: 250,
    );
  }

  // 创建文本
  Widget text(String text) => Text(text, style: TextStyle(fontSize: 18));

  // 创建对话框选项
  Widget setImageDialog(BuildContext context, int number) => showDialog(
        context: context,
        builder: (BuildContext context) => AlertDialog(
          title: Text("Select option"),
          actions: [useGallery(number), useCamera(number)],
        ),
      );

  @override
  Widget build(BuildContext bc) {
    return Scaffold(
      appBar: AppBar(title: Center(child: Text(_status))),
      body: Container(
        margin: EdgeInsets.fromLTRB(0, 0, 0, MediaQuery.of(bc).size.height / 8),
        width: double.infinity,
        child: Column(
          mainAxisAlignment: MainAxisAlignment.center,
          children: <Widget>[
            image(_uiImage1, () => setImageDialog(bc, 1)),
            image(_uiImage2, () => setImageDialog(bc, 2)),
            Container(margin: EdgeInsets.fromLTRB(0, 0, 0, 15)),
            button("Match", () => matchFaces()),
            button("Liveness", () => startLiveness()),
            button("Clear", () => clearResults()),
            Container(margin: EdgeInsets.fromLTRB(0, 15, 0, 0)),
            Row(
              mainAxisAlignment: MainAxisAlignment.center,
              children: [
                text("Similarity: " + _similarityStatus),
                Container(margin: EdgeInsets.fromLTRB(20, 0, 0, 0)),
                text("Liveness: " + _livenessStatus)
              ],
            )
          ],
        ),
      ),
    );
  }
}

class MyApp extends StatefulWidget {
  @override
  _MyAppState createState() => _MyAppState();
}

关键点解释

  • 初始化:通过initialize方法初始化Face SDK,如果有许可证文件则加载许可证以启用离线匹配。
  • 活体检测:使用startLiveness方法启动活体检测流程。
  • 人脸匹配:使用matchFaces方法对两张人脸图像进行匹配,并显示相似度百分比。
  • 图像选择:用户可以选择从图库或相机获取图像,然后进行处理和匹配。
  • UI设计:使用Flutter的ScaffoldAppBar等组件构建了一个简单的用户界面,包括两个用于显示图像的区域和三个按钮(匹配、活体检测、清除)。

这个示例展示了如何在Flutter应用中集成和使用flutter_face_api插件来实现基本的人脸识别功能。根据实际需求,您可以进一步扩展和完善此示例。


更多关于Flutter人脸识别插件flutter_face_api的使用的实战系列教程也可以访问 https://www.itying.com/category-92-b0.html

1 回复

更多关于Flutter人脸识别插件flutter_face_api的使用的实战系列教程也可以访问 https://www.itying.com/category-92-b0.html


当然,以下是如何在Flutter项目中使用flutter_face_api插件进行人脸识别的示例代码。这个插件允许你进行基本的人脸检测和识别功能。

首先,确保你已经在pubspec.yaml文件中添加了flutter_face_api依赖:

dependencies:
  flutter:
    sdk: flutter
  flutter_face_api: ^最新版本号  # 请替换为实际的最新版本号

然后运行flutter pub get来安装依赖。

接下来,我们编写一个简单的Flutter应用来演示如何使用flutter_face_api进行人脸识别。

1. 导入必要的包

在你的Dart文件中(例如main.dart),导入必要的包:

import 'package:flutter/material.dart';
import 'package:flutter_face_api/flutter_face_api.dart';
import 'package:image_picker/image_picker.dart';  // 用于从设备选择图片

2. 初始化插件和状态

创建一个Flutter应用,并初始化状态变量来存储图像和识别结果:

void main() {
  runApp(MyApp());
}

class MyApp extends StatelessWidget {
  @override
  Widget build(BuildContext context) {
    return MaterialApp(
      home: FaceRecognitionScreen(),
    );
  }
}

class FaceRecognitionScreen extends StatefulWidget {
  @override
  _FaceRecognitionScreenState createState() => _FaceRecognitionScreenState();
}

class _FaceRecognitionScreenState extends State<FaceRecognitionScreen> {
  final picker = ImagePicker();
  File? imageFile;
  List<FaceDetectionResult>? faceResults;

  @override
  Widget build(BuildContext context) {
    return Scaffold(
      appBar: AppBar(
        title: Text('Flutter Face Recognition'),
      ),
      body: Center(
        child: Column(
          mainAxisAlignment: MainAxisAlignment.center,
          children: <Widget>[
            imageFile == null
                ? Text('No image selected.')
                : Image.file(imageFile!, width: 300, height: 300),
            SizedBox(height: 20),
            ElevatedButton(
              onPressed: () => _pickImage(context),
              child: Text('Pick Image'),
            ),
            SizedBox(height: 20),
            if (faceResults != null)
              Text('Number of faces detected: ${faceResults!.length}'),
          ],
        ),
      ),
    );
  }

  Future<void> _pickImage(BuildContext context) async {
    final pickedFile = await picker.pickImage(source: ImageSource.camera);

    if (pickedFile != null) {
      setState(() {
        imageFile = File(pickedFile.path);
        _detectFaces(imageFile!);
      });
    }
  }

  Future<void> _detectFaces(File imageFile) async {
    try {
      final faceDetector = FaceDetector();
      faceResults = await faceDetector.detectFaces(imageFile);
      setState(() {});
    } catch (e) {
      print('Error detecting faces: $e');
    }
  }
}

3. 请求权限

在Android上,你需要请求相机和存储权限。在AndroidManifest.xml中添加以下权限:

<uses-permission android:name="android.permission.CAMERA" />
<uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE" />
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />

然后,在MainActivity.kt(或MainActivity.java)中请求这些权限。如果你使用的是Kotlin,可以这样做:

import android.Manifest
import android.content.pm.PackageManager
import androidx.annotation.NonNull
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import io.flutter.embedding.android.FlutterActivity

class MainActivity: FlutterActivity() {
    private val REQUEST_IMAGE_CAPTURE = 1
    private val REQUEST_CAMERA_PERMISSION = 200

    override fun onRequestPermissionsResult(
        requestCode: Int,
        permissions: Array<out String>,
        grantResults: IntArray
    ) {
        super.onRequestPermissionsResult(requestCode, permissions, grantResults)
        if (requestCode == REQUEST_CAMERA_PERMISSION) {
            if ((grantResults.isNotEmpty() && grantResults[0] == PackageManager.PERMISSION_GRANTED)) {
                // permission granted
            } else {
                // permission denied
            }
        }
    }

    override fun onCreate(savedInstanceState: Bundle?) {
        super.onCreate(savedInstanceState)

        if (ContextCompat.checkSelfPermission(
                this,
                Manifest.permission.CAMERA
            ) != PackageManager.PERMISSION_GRANTED
        ) {
            ActivityCompat.requestPermissions(
                this,
                arrayOf(Manifest.permission.CAMERA),
                REQUEST_CAMERA_PERMISSION
            )
        }
    }
}

4. 运行应用

现在,你可以运行你的Flutter应用,选择一张图片,然后应用将使用flutter_face_api检测图像中的人脸。

请注意,这只是一个基本示例,flutter_face_api插件可能还提供了更多高级功能,如人脸识别、标记特征点等。你可以查阅该插件的官方文档以获取更多信息和高级用法。

回到顶部