Flutter人脸识别插件apple_vision_face的使用

Flutter人脸识别插件apple_vision_face的使用

apple_vision_face

Pub Version analysis Star on Github License: MIT

Apple Vision Face Detection 是一个 Flutter 插件,它使 Flutter 应用程序能够使用 Apple Vision Face Detection

  • 该插件不是由 Apple 赞助或维护。作者是一些希望为 macOS 创建类似 Google ML Kit 的开发者。

Requirements

MacOS

  • 最低 osx 部署目标: 11.0
  • Xcode 13 或更新版本
  • Swift 5
  • ML Kit 仅支持 64 位架构(x86_64 和 arm64)

iOS

  • 最低 ios 部署目标: 13.0
  • Xcode 13 或更新版本
  • Swift 5
  • ML Kit 仅支持 64 位架构(x86_64 和 arm64)

Getting Started

首先需要导入 'package:apple_vision/apple_vision.dart';

final GlobalKey cameraKey = GlobalKey(debugLabel: "cameraKey");
late AppleVisionFaceController cameraController;
late List<CameraMacOSDevice> _cameras;
CameraMacOSController? controller;
String? deviceId;

FaceData? faceData;

[@override](/user/override)
void initState() {
  cameraController = AppleVisionFaceController();
  CameraMacOS.instance.listDevices(deviceType: CameraMacOSDeviceType.video).then((value){
    _cameras = value;
    deviceId = _cameras.first.deviceId;
  });
  super.initState();
}

[@override](/user/override)
void dispose() {
  controller?.destroy();
  super.dispose();
}

void onTakePictureButtonPressed() async{
  CameraMacOSFile? file = await controller?.takePicture();
  if(file != null && mounted) {
    Uint8List? image = file.bytes;
    cameraController.process(image!, const Size(640,480)).then((data){
      faceData = data;
      setState(() {});
    });
  }
}

[@override](/user/override)
Widget build(BuildContext context) {
  deviceWidth = MediaQuery.of(context).size.width;
  deviceHeight = MediaQuery.of(context).size.height;
  return Stack(
    children:[
      SizedBox(
        width: 640, 
        height: 480, 
        child: _getScanWidgetByPlatform()
      ),
    ] + showPoints()
  );
}

List<Widget> showPoints(){
  if(faceData == null || faceData!.marks.isEmpty) return [];
  Map<LandMark, Color> colors = {
    LandMark.faceContour: Colors.amber,
    LandMark.outerLips: Colors.red,
    LandMark.innerLips: Colors.pink,
    LandMark.leftEye: Colors.green,
    LandMark.rightEye: Colors.green,
    LandMark.leftPupil: Colors.purple,
    LandMark.rightPupil: Colors.purple,
    LandMark.leftEyebrow: Colors.lime,
    LandMark.rightEyebrow: Colors.lime,
  };
  List<Widget> widgets = [];

  for(int i = 0; i < faceData!.marks.length; i++){
    List<Point> points = faceData!.marks[i].location;
    for(int j = 0; j < points.length; j++){
      widgets.add(
        Positioned(
          left: points[j].x,
          bottom: points[j].y,
          child: Container(
            width: 10,
            height: 10,
            decoration: BoxDecoration(
              color: colors[faceData!.marks[i].landmark],
              borderRadius: BorderRadius.circular(5)
            ),
          )
        )
      );
    }
  }
  return widgets;
}

Widget _getScanWidgetByPlatform() {
  return CameraMacOSView(
    key: cameraKey,
    fit: BoxFit.fill,
    cameraMode: CameraMacOSMode.photo,
    enableAudio: false,
    onCameraLoading: (ob){
      return Container(
        width: deviceWidth,
        height: deviceHeight,
        color: Theme.of(context).canvasColor,
        alignment: Alignment.center,
        child: const CircularProgressIndicator(color: Colors.blue)
      );
    },
    onCameraInizialized: (CameraMacOSController controller) {
      setState(() {
        this.controller = controller;
        Timer.periodic(const Duration(milliseconds: 32),(_){
          onTakePictureButtonPressed();
        });
      });
    },
  );
}

Example

查看此 API 的示例 在这里

import 'package:apple_vision_face/apple_vision_face.dart';
import 'package:flutter/material.dart';
import '../camera/camera_insert.dart';
import 'package:flutter/foundation.dart';
import 'package:flutter/services.dart';
import 'camera/input_image.dart';

void main() {
  runApp(const MyApp());
}

class MyApp extends StatelessWidget {
  const MyApp({super.key});

  [@override](/user/override)
  Widget build(BuildContext context) {
    return MaterialApp(
      title: 'Flutter Demo',
      theme: ThemeData(
        primarySwatch: Colors.blue,
      ),
      home: const VisionFace(),
    );
  }
}

class VisionFace extends StatefulWidget {
  const VisionFace({
    Key? key,
    this.onScanned
  }):super(key: key);

  final Function(dynamic data)? onScanned; 

  [@override](/user/override)
  _VisionFace createState() => _VisionFace();
}

class _VisionFace extends State<VisionFace> {
  final GlobalKey cameraKey = GlobalKey(debugLabel: "cameraKey");
  AppleVisionFaceController visionController = AppleVisionFaceController();
  InsertCamera camera = InsertCamera();
  String? deviceId;
  bool loading = true;
  Size imageSize = const Size(640,640*9/16);

  List<FaceData>? faceData;
  late double deviceWidth;
  late double deviceHeight;

  [@override](/user/override)
  void initState() {
    camera.setupCameras().then((value){
      setState(() {
        loading = false;
      });
      camera.startLiveFeed((InputImage i){
        if(i.metadata?.size != null){
          imageSize = i.metadata!.size;
        }
        if(mounted) {
          Uint8List? image = i.bytes;
          visionController.processImage(image!, i.metadata!.size).then((data){
            faceData = data;
            setState(() {});
          });
        }
      });
    });
    super.initState();
  }

  [@override](/user/override)
  void dispose() {
    camera.dispose();
    super.dispose();
  }

  [@override](/user/override)
  Widget build(BuildContext context) {
    deviceWidth = MediaQuery.of(context).size.width;
    deviceHeight = MediaQuery.of(context).size.height;
    return Stack(
      children:[
        SizedBox(
          width: imageSize.width, 
          height: imageSize.height, 
          child: loading ? Container() : CameraSetup(camera: camera, size: imageSize,)
        ),
      ] + showPoints()
    );
  }

  List<Widget> showPoints(){
    if(faceData == null || faceData!.isEmpty) return [];
    List<Widget> widgets = [];
    Map<LandMark, Color> colors = {
      LandMark.faceContour: Colors.amber,
      LandMark.outerLips: Colors.red,
      LandMark.innerLips: Colors.pink,
      LandMark.leftEye: Colors.green,
      LandMark.rightEye: Colors.green,
      LandMark.leftPupil: Colors.purple,
      LandMark.rightPupil: Colors.purple,
      LandMark.leftEyebrow: Colors.lime,
      LandMark.rightEyebrow: Colors.lime,
    };

    for(int k = 0; k < faceData!.length; k++){
      if(faceData![k].marks.isNotEmpty){
        for(int i = 0; i < faceData![k].marks.length; i++){
          List<FacePoint> points = faceData![k].marks[i].location;
          for(int j = 0; j < points.length; j++){
            widgets.add(
              Positioned(
                left: points[j].x,
                top: points[j].y,
                child: Container(
                  width: 10,
                  height: 10,
                  decoration: BoxDecoration(
                    color: colors[faceData![k].marks[i].landmark],
                    borderRadius: BorderRadius.circular(5)
                  ),
                )
              )
            );
          }
        }
      }
    }
    return widgets;
  }

  Widget loadingWidget(){
    return Container(
      width: deviceWidth,
      height: deviceHeight,
      color: Theme.of(context).canvasColor,
      alignment: Alignment.center,
      child: const CircularProgressIndicator(color: Colors.blue)
    );
  }
}

更多关于Flutter人脸识别插件apple_vision_face的使用的实战教程也可以访问 https://www.itying.com/category-92-b0.html

1 回复

更多关于Flutter人脸识别插件apple_vision_face的使用的实战系列教程也可以访问 https://www.itying.com/category-92-b0.html


当然,下面是一个关于如何在Flutter应用中使用apple_vision_face插件进行人脸识别的示例代码。请注意,这个插件仅在iOS平台上可用,因为它依赖于Apple的Vision框架。

首先,确保你已经在pubspec.yaml文件中添加了apple_vision_face依赖:

dependencies:
  flutter:
    sdk: flutter
  apple_vision_face: ^x.y.z  # 请替换为最新版本号

然后,运行flutter pub get来安装依赖。

接下来,在你的Flutter项目中,你可以使用以下代码来实现人脸识别功能:

import 'package:flutter/material.dart';
import 'package:apple_vision_face/apple_vision_face.dart';
import 'dart:typed_data';
import 'dart:ui' as ui;

void main() {
  runApp(MyApp());
}

class MyApp extends StatelessWidget {
  @override
  Widget build(BuildContext context) {
    return MaterialApp(
      home: FaceDetectionScreen(),
    );
  }
}

class FaceDetectionScreen extends StatefulWidget {
  @override
  _FaceDetectionScreenState createState() => _FaceDetectionScreenState();
}

class _FaceDetectionScreenState extends State<FaceDetectionScreen> {
  Uint8List? _imageBytes;
  List<FaceRectangle>? _faceRectangles;

  Future<void> _pickImage() async {
    final pickedFile = await ImagePicker().pickImage(source: ImageSource.camera);
    if (pickedFile != null) {
      final File imageFile = File(pickedFile.path);
      _imageBytes = await imageFile.readAsBytes();
      _detectFaces();
    }
  }

  Future<void> _detectFaces() async {
    if (_imageBytes != null) {
      final imageProvider = MemoryImage(_imageBytes!);
      final Completer<ui.Image> completer = Completer();
      imageProvider.image.resolve(ImageConfiguration()).addListener(
        ImageStreamListener((ImageInfo imageInfo, bool synchronousCall) {
          final ui.Image uiImage = imageInfo.image;
          completer.complete(uiImage);
        }),
      );

      final ui.Image uiImage = await completer.future;
      final ByteData? byteData = await uiImage.toByteData(format: ui.ImageByteFormat.png);
      if (byteData != null) {
        final faceRectangles = await AppleVisionFace.detectFaces(byteData);
        setState(() {
          _faceRectangles = faceRectangles;
        });
      }
    }
  }

  @override
  Widget build(BuildContext context) {
    return Scaffold(
      appBar: AppBar(
        title: Text('Face Detection'),
      ),
      body: Center(
        child: Column(
          mainAxisAlignment: MainAxisAlignment.center,
          children: <Widget>[
            if (_imageBytes != null)
              Image.memory(
                _imageBytes!,
                width: double.infinity,
                height: 400,
                fit: BoxFit.cover,
                child: CustomPaint(
                  size: Size.infinite,
                  foregroundPainter: FaceOverlayPainter(_faceRectangles),
                ),
              ),
            ElevatedButton(
              onPressed: _pickImage,
              child: Text('Capture Image'),
            ),
          ],
        ),
      ),
    );
  }
}

class FaceOverlayPainter extends CustomPainter {
  final List<FaceRectangle>? faceRectangles;

  FaceOverlayPainter(this.faceRectangles);

  @override
  void paint(Canvas canvas, Size size) {
    final Paint paint = Paint()
      ..color = Colors.red
      ..style = PaintingStyle.stroke
      ..strokeWidth = 4.0;

    if (faceRectangles != null) {
      for (var face in faceRectangles!) {
        final Rect rect = Rect.fromLTWH(
          face.origin.dx * size.width,
          face.origin.dy * size.height,
          face.size.width * size.width,
          face.size.height * size.height,
        );
        canvas.drawRect(rect, paint);
      }
    }
  }

  @override
  bool shouldRepaint(covariant CustomPainter oldDelegate) {
    return oldDelegate != this;
  }
}

在这个示例中,我们使用了image_picker插件来从相机捕获图像。然后,我们将图像转换为字节数据,并使用AppleVisionFace.detectFaces方法检测人脸。检测到的人脸区域用红色矩形框标出。

请确保在你的ios/Podfile中添加以下配置,以支持Swift代码(apple_vision_face插件内部使用了Swift):

platform :ios, '10.0'

# CocoaPods analytics sends network stats synchronously affecting flutter build latency.
ENV['COCOAPODS_DISABLE_STATS'] = 'true'

project 'Runner', {
  'Debug': :debug,
  'Profile': :release,
  'Release': :release,
}

def flutter_root
  generated_xcode_build_settings_path = File.expand_path(File.join('..', 'Flutter', 'Generated.xcconfig'), __FILE__)
  unless File.exist?(generated_xcode_build_settings_path)
    raise "#{generated_xcode_build_settings_path} must exist. If you're running pod install manually, make sure flutter pub get is executed first"
  end

  File.foreach(generated_xcode_build_settings_path) do |line|
    matches = line.match(/FLUTTER_ROOT\=(.*)/)
    return File.expand_path(matches[1], __FILE__) if matches
  end
  raise "FLUTTER_ROOT not found in #{generated_xcode_build_settings_path}. Try running flutter pub get in the project root."
end

require File.expand_path(File.join('packages', 'flutter_tools', 'bin', 'podhelper'), flutter_root)

flutter_ios_podfile_setup

target 'Runner' do
  use_frameworks!
  use_modular_headers!

  flutter_install_all_ios_pods File.dirname(File.realpath(__FILE__))
end

post_install do |installer|
  installer.pods_project.targets.each do |target|
    flutter_additional_ios_build_settings(target)
    target.build_configurations.each do |config|
      config.build_settings['SWIFT_VERSION'] = '5.0'  # 确保Swift版本与插件兼容
    end
  end
end

以上代码示例提供了一个完整的流程,从捕获图像到检测人脸并在图像上绘制矩形框。请确保在实际部署前测试所有功能,并根据需要进行调整。

回到顶部