Flutter相机与谷歌机器学习视觉插件camera_google_ml_vision的使用
Flutter相机与谷歌机器学习视觉插件camera_google_ml_vision的使用
camera_google_ml_vision
版本: 1.1.0-nullsafety
描述:
camera_google_ml_vision
是一个用于iOS和Android平台的Flutter包,它可以在预览相机的同时使用Google ML Vision进行检测。
flutter_camera_ml_vision
版本: 1.1.0-nullsafety
描述:
flutter_camera_ml_vision
是 camera_google_ml_vision
的原始版本。本示例将使用 camera_google_ml_vision
包来展示如何在Flutter应用中实现相机功能并使用Google ML Vision进行条形码检测。
安装
首先,在 pubspec.yaml
文件中添加 camera_google_ml_vision
作为依赖项:
dependencies:
flutter:
sdk: flutter
camera_google_ml_vision: 1.1.0-nullsafety
Android
修改你的 android/app/build.gradle
文件,将最小SDK版本设置为21或更高:
minSdkVersion 21
使用
示例:条形码检测
以下是完整的示例代码,展示了如何在Flutter应用中使用 camera_google_ml_vision
进行条形码检测。
import 'dart:ui';
import 'package:camera_google_ml_vision/camera_google_ml_vision.dart';
import 'package:flutter/material.dart';
import 'package:google_ml_vision/google_ml_vision.dart';
void main() => runApp(MyApp());
class MyApp extends StatelessWidget {
[@override](/user/override)
Widget build(BuildContext context) {
return MaterialApp(
title: 'Flutter Demo',
theme: ThemeData(
primarySwatch: Colors.blue,
),
home: MyHomePage(title: 'Flutter Demo Home Page'),
);
}
}
class MyHomePage extends StatefulWidget {
MyHomePage({Key? key, required this.title}) : super(key: key);
final String title;
[@override](/user/override)
_MyHomePageState createState() => _MyHomePageState();
}
class _MyHomePageState extends State<MyHomePage> {
List<String> data = [];
[@override](/user/override)
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(
title: Text(widget.title),
),
body: Column(
mainAxisSize: MainAxisSize.min,
crossAxisAlignment: CrossAxisAlignment.center,
children: [
ElevatedButton(
onPressed: () async {
final barcode = await Navigator.of(context).push<Barcode>(
MaterialPageRoute(
builder: (c) {
return ScanPage();
},
),
);
if (barcode == null) {
return;
}
setState(() {
data.add(barcode.displayValue.toString());
});
},
child: Text('Scan product'),
),
Expanded(
child: ListView(
children: data.map((d) => Text(d)).toList(),
),
),
],
),
);
}
}
class ScanPage extends StatefulWidget {
[@override](/user/override)
_ScanPageState createState() => _ScanPageState();
}
class _ScanPageState extends State<ScanPage> {
bool resultSent = false;
BarcodeDetector detector = GoogleVision.instance.barcodeDetector();
[@override](/user/override)
Widget build(BuildContext context) {
return Scaffold(
body: SafeArea(
child: SizedBox(
width: MediaQuery.of(context).size.width,
child: CameraMlVision<List<Barcode>>(
overlayBuilder: (c) {
return Container(
decoration: ShapeDecoration(
shape: _ScannerOverlayShape(
borderColor: Theme.of(context).primaryColor,
borderWidth: 3.0,
),
),
);
},
detector: detector.detectInImage,
onResult: (List<Barcode> barcodes) {
if (!mounted || resultSent || barcodes == null || barcodes.isEmpty) {
return;
}
resultSent = true;
Navigator.of(context).pop<Barcode>(barcodes.first);
},
onDispose: () {
detector.close();
},
),
),
),
);
}
}
class _ScannerOverlayShape extends ShapeBorder {
final Color borderColor;
final double borderWidth;
final Color overlayColor;
_ScannerOverlayShape({
this.borderColor = Colors.white,
this.borderWidth = 1.0,
this.overlayColor = const Color(0x88000000),
});
[@override](/user/override)
EdgeInsetsGeometry get dimensions => EdgeInsets.all(10.0);
[@override](/user/override)
Path getInnerPath(Rect rect, {TextDirection? textDirection}) {
return Path()
..fillType = PathFillType.evenOdd
..addPath(getOuterPath(rect), Offset.zero);
}
[@override](/user/override)
Path getOuterPath(Rect rect, {TextDirection? textDirection}) {
Path _getLeftTopPath(Rect rect) {
return Path()
..moveTo(rect.left, rect.bottom)
..lineTo(rect.left, rect.top)
..lineTo(rect.right, rect.top);
}
return _getLeftTopPath(rect)
..lineTo(
rect.right,
rect.bottom,
)
..lineTo(
rect.left,
rect.bottom,
)
..lineTo(
rect.left,
rect.top,
);
}
[@override](/user/override)
void paint(Canvas canvas, Rect rect, {TextDirection? textDirection}) {
const lineSize = 30;
final width = rect.width;
final borderWidthSize = width * 10 / 100;
final height = rect.height;
final borderHeightSize = height - (width - borderWidthSize);
final borderSize = Size(borderWidthSize / 2, borderHeightSize / 2);
var paint = Paint()
..color = overlayColor
..style = PaintingStyle.fill;
canvas
..drawRect(
Rect.fromLTRB(
rect.left, rect.top, rect.right, borderSize.height + rect.top),
paint,
)
..drawRect(
Rect.fromLTRB(rect.left, rect.bottom - borderSize.height, rect.right,
rect.bottom),
paint,
)
..drawRect(
Rect.fromLTRB(rect.left, rect.top + borderSize.height,
rect.left + borderSize.width, rect.bottom - borderSize.height),
paint,
)
..drawRect(
Rect.fromLTRB(
rect.right - borderSize.width,
rect.top + borderSize.height,
rect.right,
rect.bottom - borderSize.height),
paint,
);
paint = Paint()
..color = borderColor
..style = PaintingStyle.stroke
..strokeWidth = borderWidth;
final borderOffset = borderWidth / 2;
final realReact = Rect.fromLTRB(
borderSize.width + borderOffset,
borderSize.height + borderOffset + rect.top,
width - borderSize.width - borderOffset,
height - borderSize.height - borderOffset + rect.top);
// Draw top right corner
canvas
..drawPath(
Path()
..moveTo(realReact.right, realReact.top)
..lineTo(realReact.right, realReact.top + lineSize),
paint)
..drawPath(
Path()
..moveTo(realReact.right, realReact.top)
..lineTo(realReact.right - lineSize, realReact.top),
paint)
..drawPoints(
PointMode.points,
[Offset(realReact.right, realReact.top)],
paint,
)
// Draw top left corner
..drawPath(
Path()
..moveTo(realReact.left, realReact.top)
..lineTo(realReact.left, realReact.top + lineSize),
paint)
..drawPath(
Path()
..moveTo(realReact.left, realReact.top)
..lineTo(realReact.left + lineSize, realReact.top),
paint)
..drawPoints(
PointMode.points,
[Offset(realReact.left, realReact.top)],
paint,
)
// Draw bottom right corner
..drawPath(
Path()
..moveTo(realReact.right, realReact.bottom)
..lineTo(realReact.right, realReact.bottom - lineSize),
paint)
..drawPath(
Path()
..moveTo(realReact.right, realReact.bottom)
..lineTo(realReact.right - lineSize, realReact.bottom),
paint)
..drawPoints(
PointMode.points,
[Offset(realReact.right, realReact.bottom)],
paint,
)
// Draw bottom left corner
..drawPath(
Path()
..moveTo(realReact.left, realReact.bottom)
..lineTo(realReact.left, realReact.bottom - lineSize),
paint)
..drawPath(
Path()
..moveTo(realReact.left, realReact.bottom)
..lineTo(realReact.left + lineSize, realReact.bottom),
paint)
..drawPoints(
PointMode.points,
[Offset(realReact.left, realReact.bottom)],
paint,
);
}
[@override](/user/override)
ShapeBorder scale(double t) {
return _ScannerOverlayShape(
borderColor: borderColor,
borderWidth: borderWidth,
overlayColor: overlayColor,
);
}
}
更多关于Flutter相机与谷歌机器学习视觉插件camera_google_ml_vision的使用的实战教程也可以访问 https://www.itying.com/category-92-b0.html
更多关于Flutter相机与谷歌机器学习视觉插件camera_google_ml_vision的使用的实战系列教程也可以访问 https://www.itying.com/category-92-b0.html
当然,以下是一个使用Flutter的camera
插件与google_ml_vision
插件来实现实时相机预览和物体识别的示例代码。
首先,确保你已经在pubspec.yaml
文件中添加了必要的依赖:
dependencies:
flutter:
sdk: flutter
camera: ^0.10.0+3 # 请检查最新版本号
google_ml_vision: ^0.14.0 # 请检查最新版本号
然后,运行flutter pub get
来安装这些依赖。
接下来,你需要处理Android和iOS平台的权限和配置。确保在AndroidManifest.xml
中添加相机权限:
<uses-permission android:name="android.permission.CAMERA" />
<uses-feature android:name="android.hardware.camera" android:required="true" />
<uses-feature android:name="android.hardware.camera.autofocus" />
在iOS上,你需要在Info.plist
中添加相机使用描述:
<key>NSCameraUsageDescription</key>
<string>Need camera access</string>
现在,让我们编写Flutter代码来实现相机预览和物体识别。
import 'package:flutter/material.dart';
import 'package:camera/camera.dart';
import 'package:google_ml_vision/google_ml_vision.dart';
List<CameraDescription> cameras;
CameraController? controller;
final ValueNotifier<List<RecognizedObject>> objects = ValueNotifier([]);
Future<void> main() async {
WidgetsFlutterBinding.ensureInitialized();
cameras = await availableCameras();
controller = CameraController(cameras[0], ResolutionPreset.high);
controller!.initialize().then((_) {
if (!mounted) {
return;
}
setState(() {});
});
runApp(MyApp());
}
class MyApp extends StatelessWidget {
@override
Widget build(BuildContext context) {
return MaterialApp(
home: CameraApp(),
);
}
}
class CameraApp extends StatefulWidget {
@override
_CameraAppState createState() => _CameraAppState();
}
class _CameraAppState extends State<CameraApp> {
ImageAnalyzer? _imageAnalyzer;
@override
void initState() {
super.initState();
_imageAnalyzer = ImageAnalyzerBuilder()
.setDetector(ObjectDetector())
.build()
..processImage(inputImage: InputImage.fromCameraImage(
controller!.value.previewImage!,
size: Size(controller!.value.previewSize!.width!.toDouble(),
controller!.value.previewSize!.height!.toDouble())))
.listen((List<RecognizedObject> results) {
objects.value = results;
});
controller!.startImageStream((image) {
_imageAnalyzer!.processImage(inputImage: InputImage.fromBytes(
bytes: image.planes[0].bytes,
size: Size(image.width, image.height),
imageRotation: 90, // 根据需要调整
format: image.format));
});
}
@override
void dispose() {
controller!.dispose();
_imageAnalyzer!.cancel();
objects.dispose();
super.dispose();
}
@override
Widget build(BuildContext context) {
if (!controller!.value.isInitialized) {
return Container();
}
return Scaffold(
appBar: AppBar(
title: Text('Camera with ML Vision'),
),
body: Stack(
children: <Widget>[
CameraPreview(controller!),
Positioned.fill(
child: Column(
crossAxisAlignment: CrossAxisAlignment.start,
children: <Widget>[
Expanded(
child: Container(
color: Colors.black45,
child: Center(
child: objects.value.isEmpty
? Text('No objects detected')
: ListView.builder(
shrinkWrap: true,
itemCount: objects.value.length,
itemBuilder: (context, index) {
final RecognizedObject object = objects.value[index];
return Padding(
padding: EdgeInsets.all(8.0),
child: Text(
'${object.label} (${object.boundingBox!.width * 100}%)',
style: TextStyle(color: Colors.white),
),
);
},
),
),
),
),
],
),
),
],
),
);
}
}
请注意,这个示例代码假设你使用的是较新的Flutter和插件版本。如果使用的版本有所不同,可能需要调整一些API调用。这个示例展示了如何使用camera
插件进行相机预览,并通过google_ml_vision
插件进行实时物体检测。检测结果会在相机预览的顶部显示。
此外,确保在真实项目中处理异常和错误情况,例如相机初始化失败、权限被拒绝等。