Flutter人体姿态识别插件apple_vision_pose的使用

Flutter人体姿态识别插件apple_vision_pose的使用

apple_vision_pose

Pub Version analysis Star on Github License: MIT

Apple Vision Pose Detection 是一个 Flutter 插件,使 Flutter 应用能够使用 Apple Vision Pose Detection

  • 此插件不是由 Apple 赞助或维护。作者是一些希望为 macOS 制作类似 Google ML Kit 的开发者。

要求

MacOS

  • 最低 osx 部署目标:11.0
  • Xcode 13 或更新版本
  • Swift 5
  • ML Kit 只支持 64 位架构(x86_64 和 arm64)

iOS

  • 最低 ios 部署目标:14.0
  • Xcode 13 或更新版本
  • Swift 5
  • ML Kit 只支持 64 位架构(x86_64 和 arm64)

开始使用

首先需要导入 ‘package:apple_vision/apple_vision.dart’:

final GlobalKey cameraKey = GlobalKey(debugLabel: "cameraKey");
late AppleVisionPoseController cameraController;
late List<CameraMacOSDevice> _cameras;
CameraMacOSController? controller;
String? deviceId;

PoseData? poseData;
late double deviceWidth;
late double deviceHeight;

[@override](/user/override)
void initState() {
  cameraController = AppleVisionPoseController();
  CameraMacOS.instance.listDevices(deviceType: CameraMacOSDeviceType.video).then((value) {
    _cameras = value;
    deviceId = _cameras.first.deviceId;
  });
  super.initState();
}

[@override](/user/override)
void dispose() {
  controller?.destroy();
  super.dispose();
}

void onTakePictureButtonPressed() async {
  CameraMacOSFile? file = await controller?.takePicture();
  if (file != null && mounted) {
    Uint8List? image = file.bytes;
    cameraController.process(image!, const Size(640, 480)).then((data) {
      poseData = data;
      setState(() {});
    });
  }
}

[@override](/user/override)
Widget build(BuildContext context) {
  deviceWidth = MediaQuery.of(context).size.width;
  deviceHeight = MediaQuery.of(context).size.height;
  return Stack(
    children: [
      SizedBox(
        width: 640, 
        height: 480, 
        child: _getScanWidgetByPlatform()
      ),
    ] + showPoints()
  );
}

List<Widget> showPoints() {
  if (poseData == null || poseData!.poses.isEmpty) return [];
  Map<Joint, Color> colors = {
    Joint.rightFoot: Colors.orange,
    Joint.rightLeg: Colors.orange,
    Joint.rightUpLeg: Colors.orange,

    Joint.rightHand: Colors.purple,
    Joint.rightForearm: Colors.purple,

    Joint.nose: Colors.purple,

    Joint.neck: Colors.pink,
    Joint.rightShoulder: Colors.pink,
    Joint.leftShoulder: Colors.pink,

    Joint.leftForearm: Colors.indigo,
    Joint.leftHand: Colors.indigo,

    Joint.leftUpLeg: Colors.grey,
    Joint.leftLeg: Colors.grey,
    Joint.leftFoot: Colors.grey,

    Joint.root: Colors.yellow,

    Joint.leftEye: Colors.cyanAccent,
    Joint.leftEar: Colors.cyanAccent,
    Joint.rightEar: Colors.cyanAccent,
    Joint.rightEye: Colors.cyanAccent,
    Joint.head: Colors.cyanAccent
  };
  List<Widget> widgets = [];
  for (int i = 0; i < poseData!.poses.length; i++) {
    if (poseData!.poses[i].confidence > 0.5) {
      widgets.add(
        Positioned(
          bottom: poseData!.poses[i].location.y,
          left: poseData!.poses[i].location.x,
          child: Container(
            width: 10,
            height: 10,
            decoration: BoxDecoration(
              color: colors[poseData!.poses[i].joint],
              borderRadius: BorderRadius.circular(5)
            ),
          )
        )
      );
    }
  }
  return widgets;
}

Widget _getScanWidgetByPlatform() {
  return CameraMacOSView(
    key: cameraKey,
    fit: BoxFit.fill,
    cameraMode: CameraMacOSMode.photo,
    enableAudio: false,
    onCameraLoading: (ob) {
      return Container(
        width: deviceWidth,
        height: deviceHeight,
        color: Theme.of(context).canvasColor,
        alignment: Alignment.center,
        child: const CircularProgressIndicator(color: Colors.blue)
      );
    },
    onCameraInizialized: (CameraMacOSController controller) {
      setState(() {
        this.controller = controller;
        Timer.periodic(const Duration(milliseconds: 128), (_){
          onTakePictureButtonPressed();
        });
      });
    },
  );
}

示例

完整的示例代码可以在以下链接找到:main.dart

import 'package:apple_vision_pose/apple_vision_pose.dart';
import 'package:flutter/material.dart';
import '../camera/camera_insert.dart';
import 'package:flutter/foundation.dart';
import 'package:flutter/services.dart';
import 'camera/input_image.dart';

void main() {
  runApp(const MyApp());
}

class MyApp extends StatelessWidget {
  const MyApp({super.key});

  [@override](/user/override)
  Widget build(BuildContext context) {
    return MaterialApp(
      title: 'Flutter Demo',
      theme: ThemeData(
        primarySwatch: Colors.blue,
      ),
      home: const VisionPose(),
    );
  }
}

class VisionPose extends StatefulWidget {
  const VisionPose({
    Key? key,
    this.onScanned
  }) : super(key: key);

  final Function(dynamic data)? onScanned;

  [@override](/user/override)
  _VisionPose createState() => _VisionPose();
}

class _VisionPose extends State<VisionPose> {
  final GlobalKey cameraKey = GlobalKey(debugLabel: "cameraKey");
  late AppleVisionPoseController visionController = AppleVisionPoseController();
  InsertCamera camera = InsertCamera();
  Size imageSize = const Size(640, 640 * 9 / 16);
  String? deviceId;
  bool loading = true;

  List<PoseData>? poseData;
  late double deviceWidth;
  late double deviceHeight;

  [@override](/user/override)
  void initState() {
    camera.setupCameras().then((value) {
      setState(() {
        loading = false;
      });
      camera.startLiveFeed((InputImage i) {
        if (i.metadata?.size != null) {
          imageSize = i.metadata!.size;
        }
        if (mounted) {
          Uint8List? image = i.bytes;
          visionController.processImage(image!, i.metadata!.size).then((data) {
            poseData = data;
            setState(() {});
          });
        }
      });
    });
    super.initState();
  }

  [@override](/user/override)
  void dispose() {
    camera.dispose();
    super.dispose();
  }

  [@override](/user/override)
  Widget build(BuildContext context) {
    deviceWidth = MediaQuery.of(context).size.width;
    deviceHeight = MediaQuery.of(context).size.height;
    return Stack(
      children: [
        SizedBox(
          width: imageSize.width,
          height: imageSize.height,
          child: loading ? Container() : CameraSetup(camera: camera, size: imageSize),
        ),
      ] + showPoints()
    );
  }

  List<Widget> showPoints() {
    if (poseData == null || poseData!.isEmpty) return [];
    Map<Joint, Color> colors = {
      Joint.rightFoot: Colors.orange,
      Joint.rightLeg: Colors.orange,
      Joint.rightUpLeg: Colors.orange,

      Joint.rightHand: Colors.purple,
      Joint.rightForearm: Colors.purple,

      Joint.nose: Colors.purple,

      Joint.neck: Colors.pink,
      Joint.rightShoulder: Colors.pink,
      Joint.leftShoulder: Colors.pink,

      Joint.leftForearm: Colors.indigo,
      Joint.leftHand: Colors.indigo,

      Joint.leftUpLeg: Colors.grey,
      Joint.leftLeg: Colors.grey,
      Joint.leftFoot: Colors.grey,

      Joint.root: Colors.yellow,

      Joint.leftEye: Colors.cyanAccent,
      Joint.leftEar: Colors.cyanAccent,
      Joint.rightEar: Colors.cyanAccent,
      Joint.rightEye: Colors.cyanAccent,
      Joint.head: Colors.cyanAccent
    };
    List<Widget> widgets = [];
    for (int j = 0; j < poseData!.length; j++) {
      for (int i = 0; i < poseData![j].poses.length; i++) {
        if (poseData![j].poses[i].confidence > 0.3) {
          widgets.add(
            Positioned(
              top: imageSize.height - poseData![j].poses[i].location.y,
              left: poseData![j].poses[i].location.x,
              child: Container(
                width: 10,
                height: 10,
                decoration: BoxDecoration(
                  color: colors[poseData![j].poses[i].joint],
                  borderRadius: BorderRadius.circular(5)
                ),
              )
            )
          );
        }
      }
    }
    return widgets;
  }
}

更多关于Flutter人体姿态识别插件apple_vision_pose的使用的实战教程也可以访问 https://www.itying.com/category-92-b0.html

1 回复

更多关于Flutter人体姿态识别插件apple_vision_pose的使用的实战系列教程也可以访问 https://www.itying.com/category-92-b0.html


当然,以下是如何在Flutter项目中集成和使用apple_vision_pose插件进行人体姿态识别的代码示例。apple_vision_pose插件是专门为iOS设备设计的,它利用Apple的Vision框架来实现人体姿态识别功能。

前提条件

  1. 确保你已经在Flutter环境中设置了iOS开发环境。
  2. 确保你的ios/Podfile中设置了合适的iOS版本(至少iOS 13.0,因为某些Vision功能需要这个版本或更高)。

步骤

  1. 添加依赖

在你的pubspec.yaml文件中添加apple_vision_pose依赖:

dependencies:
  flutter:
    sdk: flutter
  apple_vision_pose: ^x.y.z  # 请替换为最新版本号

然后运行flutter pub get来安装依赖。

  1. 配置iOS项目

确保你的Info.plist文件中包含必要的权限请求,比如相机权限:

<key>NSCameraUsageDescription</key>
<string>需要访问相机以进行人体姿态识别</string>
  1. 使用插件

在你的Flutter项目中,你可以使用以下代码来捕获相机图像并进行人体姿态识别。

import 'package:flutter/material.dart';
import 'package:apple_vision_pose/apple_vision_pose.dart';
import 'package:camera/camera.dart';

List<CameraDescription> cameras;
late CameraController controller;

void main() {
  WidgetsFlutterBinding.ensureInitialized();
  cameras = availableCameras();
  controller = CameraController(cameras[0], ResolutionPreset.high);
  controller.initialize().then((_) {
    runApp(MyApp());
  });
}

class MyApp extends StatelessWidget {
  @override
  Widget build(BuildContext context) {
    return MaterialApp(
      home: CameraPreviewPage(),
    );
  }
}

class CameraPreviewPage extends StatefulWidget {
  @override
  _CameraPreviewPageState createState() => _CameraPreviewPageState();
}

class _CameraPreviewPageState extends State<CameraPreviewPage> {
  late PoseResult? poseResult;

  @override
  void dispose() {
    controller.dispose();
    super.dispose();
  }

  Future<void> _processImage(CameraImage image) async {
    final planes = image.planes;
    final writeBuffer = planes.map((plane) {
      return plane.bytes;
    }).toList();

    try {
      poseResult = await AppleVisionPose.processImage(
        width: image.width,
        height: image.height,
        planes: writeBuffer,
      );
    } catch (e) {
      print(e);
    }

    if (mounted) {
      setState(() {});
    }
  }

  @override
  Widget build(BuildContext context) {
    return Scaffold(
      appBar: AppBar(
        title: Text('人体姿态识别'),
      ),
      body: Column(
        children: <Widget>[
          Expanded(
            child: CameraPreview(controller),
          ),
          if (poseResult != null)
            PoseResultWidget(poseResult!)
          else
            Container(
              height: 100,
              child: Center(child: Text('正在处理...')),
            ),
        ],
      ),
    );
  }
}

class PoseResultWidget extends StatelessWidget {
  final PoseResult poseResult;

  PoseResultWidget(this.poseResult);

  @override
  Widget build(BuildContext context) {
    return Padding(
      padding: const EdgeInsets.all(8.0),
      child: Column(
        crossAxisAlignment: CrossAxisAlignment.start,
        children: <Widget>[
          Text('人体姿态识别结果:', style: TextStyle(fontWeight: FontWeight.bold)),
          ...poseResult.keypoints.map((keypoint) {
            return Text(
              '${keypoint.label}: (${keypoint.x.toStringAsFixed(2)}, ${keypoint.y.toStringAsFixed(2)})',
            );
          }),
        ],
      ),
    );
  }
}

注意

  1. 上面的代码使用了camera插件来捕获相机图像。你需要添加camera插件的依赖并在Info.plist中请求相机权限。
  2. apple_vision_pose插件目前只支持iOS平台,因此如果你在Android上运行该代码,它将不会工作。
  3. 确保在真机(iOS设备)上运行该应用,因为模拟器可能不支持所有Vision框架的功能。

希望这个示例能帮助你理解如何在Flutter项目中使用apple_vision_pose插件进行人体姿态识别。

回到顶部