Flutter苹果设备视觉提升主体检测插件apple_vision_lift_subjects的使用
Flutter苹果设备视觉提升主体检测插件apple_vision_lift_subjects的使用
Apple Vision Lift Subject
是一个 Flutter 插件,使 Flutter 应用能够在苹果设备上使用 Apple Vision Lift Subject。
要求
MacOS
- 最低操作系统版本:14.0
- Xcode 15 或更新版本
- Swift 5
- ML Kit 只支持 64 位架构(x86_64 和 arm64)
iOS
- 开发阶段不支持
- 最低操作系统版本:17.0
- Xcode 15 或更新版本
- Swift 5
- ML Kit 只支持 64 位架构(x86_64 和 arm64)
开始使用
首先,你需要导入 apple_vision_lift_subjects
包:
import 'package:apple_vision_lift_subjects/apple_vision_lift_subjects.dart';
接下来,你可以初始化控制器并处理图像数据。以下是一个完整的示例代码:
import 'package:flutter/material.dart';
import 'package:flutter/services.dart';
import 'package:apple_vision_lift_subjects/apple_vision_lift_subjects.dart';
void main() {
runApp(const MyApp());
}
class MyApp extends StatelessWidget {
const MyApp({super.key});
[@override](/user/override)
Widget build(BuildContext context) {
return MaterialApp(
title: 'Flutter Demo',
theme: ThemeData(
primarySwatch: Colors.blue,
),
home: const VisionLiftSubjects(),
);
}
}
class VisionLiftSubjects extends StatefulWidget {
const VisionLiftSubjects({
Key? key,
this.onScanned
}):super(key: key);
final Function(dynamic data)? onScanned;
[@override](/user/override)
_VisionLiftSubjects createState() => _VisionLiftSubjects();
}
class _VisionLiftSubjects extends State<VisionLiftSubjects>{
final GlobalKey cameraKey = GlobalKey(debugLabel: "cameraKey");
late AppleVisionliftSubjectsController visionController = AppleVisionliftSubjectsController();
Size imageSize = const Size(640,640*9/16);
String? deviceId;
bool loading = true;
List<Uint8List?> images = [];
late double deviceWidth;
late double deviceHeight;
Uint8List? bg;
Uint8List? flowers;
List<Uint8List?> sepImages = [];
Point? point;
[@override](/user/override)
void initState() {
rootBundle.load('assets/WaterOnTheMoonFull.jpg').then((value){
bg = value.buffer.asUint8List();
});
processImages();
super.initState();
}
void processImages(){
rootBundle.load('assets/rose.jpg').then((value){
visionController.processImage(
LiftedSubjectsData(
image: value.buffer.asUint8List(),
imageSize: const Size(640,425),
crop: true,
)
).then((value){
if(value != null){
images.add(value);
setState(() {});
}
});
});
rootBundle.load('assets/human.png').then((value){
visionController.processImage(
LiftedSubjectsData(
image: value.buffer.asUint8List(),
imageSize: const Size(512,512),
backGround: bg
)
).then((value){
if(value != null){
images.add(value);
setState(() {});
}
});
});
rootBundle.load('assets/flowers.jpg').then((value){
flowers = value.buffer.asUint8List();
onTouch(false);
});
}
void onTouch(bool useSep){
visionController.processImage(
LiftedSubjectsData(
image: flowers!,
imageSize: const Size(600,400),
crop: useSep,
touchPoint: point
)
).then((value){
if(value != null){
if(useSep){
sepImages.add(value);
}
else{
images.add(value);
}
setState(() {});
}
});
}
List<Widget> showImages(){
List<Widget> widgets = [];
for(int i = 0; i < images.length; i++){
if(i == images.length-1 && images[i] != null){
double w = 600;
double h = 400;
widgets.add(
SizedBox(
width: w,
height: h,
child: GestureDetector(
onTapDown: (td){
point = Point(
td.localPosition.dx/w,
td.localPosition.dy/h
);
sepImages = [];
onTouch(true);
},
child: Image.memory(
images[i]!,
fit: BoxFit.fitHeight,
),
)
)
);
}
else if(images[i] != null){
widgets.add(
Image.memory(
images[i]!,
fit: BoxFit.fitHeight,
)
);
}
}
for(int i = 0; i < sepImages.length; i++){
if(sepImages[i] != null){
widgets.add(
Image.memory(
sepImages[i]!,
fit: BoxFit.fitHeight,
)
);
}
}
return widgets;
}
[@override](/user/override)
Widget build(BuildContext context) {
deviceWidth = MediaQuery.of(context).size.width;
deviceHeight = MediaQuery.of(context).size.height;
return ListView(
children:[
Wrap(
children: showImages(),
)
]
);
}
}
示例代码
以下是完整的示例代码:
import 'package:flutter/material.dart';
import 'package:flutter/services.dart';
import 'package:apple_vision_lift_subjects/apple_vision_lift_subjects.dart';
void main() {
runApp(const MyApp());
}
class MyApp extends StatelessWidget {
const MyApp({super.key});
[@override](/user/override)
Widget build(BuildContext context) {
return MaterialApp(
title: 'Flutter Demo',
theme: ThemeData(
primarySwatch: Colors.blue,
),
home: const VisionLiftSubjects(),
);
}
}
class VisionLiftSubjects extends StatefulWidget {
const VisionLiftSubjects({
Key? key,
this.onScanned
}):super(key: key);
final Function(dynamic data)? onScanned;
[@override](/user/override)
_VisionLiftSubjects createState() => _VisionLiftSubjects();
}
class _VisionLiftSubjects extends State<VisionLiftSubjects>{
final GlobalKey cameraKey = GlobalKey(debugLabel: "cameraKey");
late AppleVisionliftSubjectsController visionController = AppleVisionliftSubjectsController();
Size imageSize = const Size(640,640*9/16);
String? deviceId;
bool loading = true;
List<Uint8List?> images = [];
late double deviceWidth;
late double deviceHeight;
Uint8List? bg;
Uint8List? flowers;
List<Uint8List?> sepImages = [];
Point? point;
[@override](/user/override)
void initState() {
rootBundle.load('assets/WaterOnTheMoonFull.jpg').then((value){
bg = value.buffer.asUint8List();
});
processImages();
super.initState();
}
void processImages(){
rootBundle.load('assets/rose.jpg').then((value){
visionController.processImage(
LiftedSubjectsData(
image: value.buffer.asUint8List(),
imageSize: const Size(640,425),
crop: true,
)
).then((value){
if(value != null){
images.add(value);
setState(() {});
}
});
});
rootBundle.load('assets/human.png').then((value){
visionController.processImage(
LiftedSubjectsData(
image: value.buffer.asUint8List(),
imageSize: const Size(512,512),
backGround: bg
)
).then((value){
if(value != null){
images.add(value);
setState(() {});
}
});
});
rootBundle.load('assets/flowers.jpg').then((value){
flowers = value.buffer.asUint8List();
onTouch(false);
});
}
void onTouch(bool useSep){
visionController.processImage(
LiftedSubjectsData(
image: flowers!,
imageSize: const Size(600,400),
crop: useSep,
touchPoint: point
)
).then((value){
if(value != null){
if(useSep){
sepImages.add(value);
}
else{
images.add(value);
}
setState(() {});
}
});
}
List<Widget> showImages(){
List<Widget> widgets = [];
for(int i = 0; i < images.length; i++){
if(i == images.length-1 && images[i] != null){
double w = 600;
double h = 400;
widgets.add(
SizedBox(
width: w,
height: h,
child: GestureDetector(
onTapDown: (td){
point = Point(
td.localPosition.dx/w,
td.localPosition.dy/h
);
sepImages = [];
onTouch(true);
},
child: Image.memory(
images[i]!,
fit: BoxFit.fitHeight,
),
)
)
);
}
else if(images[i] != null){
widgets.add(
Image.memory(
images[i]!,
fit: BoxFit.fitHeight,
)
);
}
}
for(int i = 0; i < sepImages.length; i++){
if(sepImages[i] != null){
widgets.add(
Image.memory(
sepImages[i]!,
fit: BoxFit.fitHeight,
)
);
}
}
return widgets;
}
[@override](/user/override)
Widget build(BuildContext context) {
deviceWidth = MediaQuery.of(context).size.width;
deviceHeight = MediaQuery.of(context).size.height;
return ListView(
children:[
Wrap(
children: showImages(),
)
]
);
}
}
更多关于Flutter苹果设备视觉提升主体检测插件apple_vision_lift_subjects的使用的实战教程也可以访问 https://www.itying.com/category-92-b0.html
更多关于Flutter苹果设备视觉提升主体检测插件apple_vision_lift_subjects的使用的实战系列教程也可以访问 https://www.itying.com/category-92-b0.html
当然,下面是一个关于如何在Flutter中使用apple_vision_lift_subjects
插件来提升苹果设备上视觉主体检测功能的代码示例。这个插件允许你利用Apple Vision框架来检测图像中的主要视觉主体。
首先,确保你已经在pubspec.yaml
文件中添加了apple_vision_lift_subjects
依赖:
dependencies:
flutter:
sdk: flutter
apple_vision_lift_subjects: ^最新版本号 # 请替换为实际的最新版本号
然后,运行flutter pub get
来安装依赖。
接下来,在你的Flutter应用中,你可以使用以下代码来检测图像中的主要视觉主体:
import 'package:flutter/material.dart';
import 'package:apple_vision_lift_subjects/apple_vision_lift_subjects.dart';
import 'dart:ui' as ui;
import 'dart:typed_data';
void main() {
runApp(MyApp());
}
class MyApp extends StatelessWidget {
@override
Widget build(BuildContext context) {
return MaterialApp(
home: VisionDemo(),
);
}
}
class VisionDemo extends StatefulWidget {
@override
_VisionDemoState createState() => _VisionDemoState();
}
class _VisionDemoState extends State<VisionDemo> {
List<Rect> _subjects = [];
Uint8List? _imageBytes;
@override
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(
title: Text('Apple Vision Lift Subjects Demo'),
),
body: Column(
children: [
_buildImage(),
SizedBox(height: 20),
_buildSubjectOverlays(),
],
),
floatingActionButton: FloatingActionButton(
onPressed: _pickImage,
tooltip: 'Pick Image',
child: Icon(Icons.add_a_photo),
),
);
}
Widget _buildImage() {
if (_imageBytes == null) {
return Center(child: Text('No image selected.'));
}
return Image.memory(_imageBytes!);
}
Widget _buildSubjectOverlays() {
if (_subjects.isEmpty) {
return Container();
}
return Stack(
children: _subjects.map((rect) {
return Positioned(
left: rect.left,
top: rect.top,
width: rect.width,
height: rect.height,
child: Container(
decoration: BoxDecoration(
border: Border.all(color: Colors.red, width: 2),
),
),
);
}).toList(),
);
}
Future<void> _pickImage() async {
final pickedFile = await ImagePicker().pickImage(source: ImageSource.gallery);
if (pickedFile != null) {
final File file = File(pickedFile.path);
_imageBytes = await file.readAsBytes();
_detectSubjects();
}
}
Future<void> _detectSubjects() async {
if (_imageBytes == null) return;
final ui.Codec codec = await ui.instantiateImageCodec(_imageBytes!);
final ui.FrameInfo frameInfo = await codec.getNextFrame();
final ui.Image image = frameInfo.image;
final ByteData? byteData = await image.toByteData(format: ui.ImageByteFormat.png);
if (byteData == null) return;
final Uint8List pngBytes = byteData.buffer.asUint8List();
final List<VisionRect> subjects = await detectLiftSubjects(pngBytes);
setState(() {
_subjects = subjects.map((visionRect) => Rect.fromLTWH(
visionRect.x,
visionRect.y,
visionRect.width,
visionRect.height,
)).toList();
});
}
}
Future<List<VisionRect>> detectLiftSubjects(Uint8List imageBytes) async {
// 注意:这里的实现依赖于apple_vision_lift_subjects插件的内部API,
// 假设该插件提供了一个类似的方法来进行主体检测。
// 由于插件的实际API可能有所不同,以下代码仅作为示例。
// 你需要参考插件的官方文档来调整这部分代码。
// 伪代码示例:
// final result = await AppleVisionLiftSubjects.detect(imageBytes);
// return result.map((subject) => VisionRect(subject.x, subject.y, subject.width, subject.height)).toList();
// 这里返回一个空列表作为占位符,你需要根据插件的实际API来实现。
return [];
}
// 假设VisionRect是一个简单的数据类,用于存储矩形的坐标和尺寸。
class VisionRect {
final double x;
final double y;
final double width;
final double height;
VisionRect(this.x, this.y, this.width, this.height);
}
注意:
apple_vision_lift_subjects
插件可能并没有直接提供一个名为detectLiftSubjects
的方法。你需要参考该插件的实际API文档来实现主体检测功能。- 由于这个插件是特定于iOS的,因此你需要在iOS平台上运行这段代码。
- 你可能需要在
Info.plist
文件中添加一些权限请求,以允许应用访问照片库。
确保你已经按照插件的文档正确配置了iOS项目,并且已经满足了所有必要的权限要求。