Flutter图像处理插件camera_process的使用
Flutter图像处理插件camera_process的使用
概述
camera_process
插件利用 Google 的 MLKit 实现了从照片或相机中识别人脸和文本的功能。该插件支持 Android 和 iOS 平台,并提供了文本识别和人脸检测功能。
要求
iOS
- 最低 iOS 部署目标:10.0
Android
- minSdkVersion: 21
- targetSdkVersion: 33
特性
功能 | Android | iOS |
---|---|---|
文本识别 | ✅ | ✅ |
人脸检测 | ✅ | ✅ |
示例代码
以下是一个完整的示例,展示了如何在 Flutter 应用中使用 camera_process
插件进行文本识别和人脸检测。
import 'dart:io';
import 'dart:ui' as ui;
import 'package:flutter/material.dart';
import 'package:image_picker/image_picker.dart';
import 'package:camera/camera.dart';
import 'package:camera_process/camera_process.dart';
List<CameraDescription> cameras = [];
Future<void> main() async {
WidgetsFlutterBinding.ensureInitialized();
cameras = await availableCameras();
runApp(MyApp());
}
class MyApp extends StatelessWidget {
[@override](/user/override)
Widget build(BuildContext context) {
return MaterialApp(
debugShowCheckedModeBanner: false,
home: HomeScreen(),
);
}
}
class HomeScreen extends StatelessWidget {
[@override](/user/override)
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(
title: const Text('Camera Process'),
centerTitle: true,
elevation: 0,
),
body: SafeArea(
child: Center(
child: SingleChildScrollView(
child: Padding(
padding: EdgeInsets.symmetric(horizontal: 16),
child: Column(
children: [
ListTile(
tileColor: Theme.of(context).primaryColor,
title: const Text(
'Face Detector',
style: TextStyle(color: Colors.white, fontWeight: FontWeight.bold),
),
onTap: () {
Navigator.push(context, MaterialPageRoute(builder: (context) => FaceDetectorView()));
},
),
ListTile(
tileColor: Theme.of(context).primaryColor,
title: const Text(
'Text Detector',
style: TextStyle(color: Colors.white, fontWeight: FontWeight.bold),
),
onTap: () {
Navigator.push(context, MaterialPageRoute(builder: (context) => TextDetectorView()));
},
),
],
),
),
),
),
),
);
}
}
class TextDetectorView extends StatefulWidget {
[@override](/user/override)
_TextDetectorViewState createState() => _TextDetectorViewState();
}
class _TextDetectorViewState extends State<TextDetectorView> {
TextDetector textDetector = CameraProcess.vision.textDetector();
bool isBusy = false;
CustomPaint? customPaint;
[@override](/user/override)
void dispose() async {
super.dispose();
await textDetector.close();
}
[@override](/user/override)
Widget build(BuildContext context) {
return CameraView(
title: 'Text Detector',
customPaint: customPaint,
onImage: (inputImage) {
processImage(inputImage);
},
);
}
Future<void> processImage(InputImage inputImage) async {
if (isBusy) return;
isBusy = true;
final recognisedText = await textDetector.processImage(inputImage);
print('Found ${recognisedText.blocks.length} textBlocks');
if (inputImage.inputImageData?.size != null && inputImage.inputImageData?.imageRotation != null) {
final painter = TextDetectorPainter(
recognisedText,
inputImage.inputImageData!.size,
inputImage.inputImageData!.imageRotation);
customPaint = CustomPaint(painter: painter);
} else {
customPaint = null;
}
isBusy = false;
if (mounted) {
setState(() {});
}
}
}
class FaceDetectorView extends StatefulWidget {
[@override](/user/override)
_FaceDetectorViewState createState() => _FaceDetectorViewState();
}
class _FaceDetectorViewState extends State<FaceDetectorView> {
FaceDetector faceDetector = CameraProcess.vision.faceDetector(FaceDetectorOptions(enableContours: true, enableClassification: true));
bool isBusy = false;
CustomPaint? customPaint;
[@override](/user/override)
void dispose() {
faceDetector.close();
super.dispose();
}
[@override](/user/override)
Widget build(BuildContext context) {
return CameraView(
title: 'Face Detector',
customPaint: customPaint,
onImage: (inputImage) {
processImage(inputImage);
},
initialDirection: CameraLensDirection.front,
);
}
Future<void> processImage(InputImage inputImage) async {
if (isBusy) return;
isBusy = true;
final faces = await faceDetector.processImage(inputImage);
print('Found ${faces.length} faces');
if (inputImage.inputImageData?.size != null && inputImage.inputImageData?.imageRotation != null) {
final painter = FaceDetectorPainter(faces, inputImage.inputImageData!.size, inputImage.inputImageData!.imageRotation);
customPaint = CustomPaint(painter: painter);
} else {
customPaint = null;
}
isBusy = false;
if (mounted) {
setState(() {});
}
}
}
enum ScreenMode { liveFeed, gallery }
class CameraView extends StatefulWidget {
CameraView({
Key? key,
required this.title,
required this.customPaint,
required this.onImage,
this.initialDirection = CameraLensDirection.back,
}) : super(key: key);
final String title;
final CustomPaint? customPaint;
final Function(InputImage inputImage) onImage;
final CameraLensDirection initialDirection;
[@override](/user/override)
_CameraViewState createState() => _CameraViewState();
}
class _CameraViewState extends State<CameraView> {
ScreenMode _mode = ScreenMode.liveFeed;
CameraController? _controller;
File? _image;
ImagePicker? _imagePicker;
int _cameraIndex = 0;
[@override](/user/override)
void initState() {
super.initState();
_imagePicker = ImagePicker();
for (var i = 0; i < cameras.length; i++) {
if (cameras[i].lensDirection == widget.initialDirection) {
_cameraIndex = i;
}
}
_startLiveFeed();
}
[@override](/user/override)
void dispose() {
_stopLiveFeed();
super.dispose();
}
[@override](/user/override)
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(
title: Text(widget.title),
actions: [
Padding(
padding: EdgeInsets.only(right: 20.0),
child: GestureDetector(
onTap: _switchScreenMode,
child: Icon(
_mode == ScreenMode.liveFeed
? Icons.photo_library_outlined
: (Platform.isIOS ? Icons.camera_alt_outlined : Icons.camera),
),
),
),
],
),
body: _body(),
floatingActionButton: _floatingActionButton(),
floatingActionButtonLocation: FloatingActionButtonLocation.centerFloat,
);
}
Widget? _floatingActionButton() {
if (_mode == ScreenMode.gallery) return null;
if (cameras.length == 1) return null;
return Container(
height: 70.0,
width: 70.0,
child: FloatingActionButton(
child: Icon(
Platform.isIOS ? Icons.flip_camera_ios_outlined : Icons.flip_camera_android_outlined,
size: 40,
),
onPressed: _switchLiveCamera,
),
);
}
Widget _body() {
Widget body;
if (_mode == ScreenMode.liveFeed)
body = _liveFeedBody();
else
body = _galleryBody();
return body;
}
Widget _liveFeedBody() {
if (_controller?.value.isInitialized == false) {
return Container();
}
return Container(
color: Colors.black,
child: Stack(
fit: StackFit.expand,
children: <Widget>[
CameraPreview(_controller!),
if (widget.customPaint != null) widget.customPaint!,
],
),
);
}
Widget _galleryBody() {
return ListView(shrinkWrap: true, children: [
_image != null
? Container(
height: 400,
width: 400,
child: Stack(
fit: StackFit.expand,
children: <Widget>[
Image.file(_image!),
if (widget.customPaint != null) widget.customPaint!,
],
),
)
: Icon(Icons.image, size: 200),
Padding(
padding: EdgeInsets.symmetric(horizontal: 16),
child: ElevatedButton(
child: Text('From Gallery'),
onPressed: () => _getImage(ImageSource.gallery),
),
),
Padding(
padding: EdgeInsets.symmetric(horizontal: 16),
child: ElevatedButton(
child: Text('Take a picture'),
onPressed: () => _getImage(ImageSource.camera),
),
),
]);
}
Future _getImage(ImageSource source) async {
final pickedFile = await _imagePicker?.getImage(source: source);
if (pickedFile != null) {
_processPickedFile(pickedFile);
} else {
print('No image selected.');
}
setState(() {});
}
void _switchScreenMode() async {
if (_mode == ScreenMode.liveFeed) {
_mode = ScreenMode.gallery;
await _stopLiveFeed();
} else {
_mode = ScreenMode.liveFeed;
await _startLiveFeed();
}
setState(() {});
}
Future _startLiveFeed() async {
final camera = cameras[_cameraIndex];
_controller = CameraController(camera, ResolutionPreset.low, enableAudio: false);
_controller?.initialize().then((_) {
if (!mounted) {
return;
}
_controller?.startImageStream(_processCameraImage);
setState(() {});
});
}
Future _stopLiveFeed() async {
await _controller?.stopImageStream();
await _controller?.dispose();
_controller = null;
}
Future _switchLiveCamera() async {
if (_cameraIndex == 0)
_cameraIndex = 1;
else
_cameraIndex = 0;
await _stopLiveFeed();
await _startLiveFeed();
}
Future _processPickedFile(PickedFile pickedFile) async {
setState(() {
_image = File(pickedFile.path);
});
final inputImage = InputImage.fromFilePath(pickedFile.path);
widget.onImage(inputImage);
}
Future _processCameraImage(CameraImage image) async {
final WriteBuffer allBytes = WriteBuffer();
for (Plane plane in image.planes) {
allBytes.putUint8List(plane.bytes);
}
final bytes = allBytes.done().buffer.asUint8List();
final Size imageSize = Size(image.width.toDouble(), image.height.toDouble());
final camera = cameras[_cameraIndex];
final imageRotation = InputImageRotationMethods.fromRawValue(camera.sensorOrientation) ?? InputImageRotation.Rotation_0deg;
final inputImageFormat = InputImageFormatMethods.fromRawValue(image.format.raw) ?? InputImageFormat.NV21;
final planeData = image.planes.map((Plane plane) {
return InputImagePlaneMetadata(bytesPerRow: plane.bytesPerRow, height: plane.height, width: plane.width);
}).toList();
final inputImageData = InputImageData(size: imageSize, imageRotation: imageRotation, inputImageFormat: inputImageFormat, planeData: planeData);
final inputImage = InputImage.fromBytes(bytes: bytes, inputImageData: inputImageData);
widget.onImage(inputImage);
}
}
class TextDetectorPainter extends CustomPainter {
TextDetectorPainter(this.recognisedText, this.absoluteImageSize, this.rotation);
final RecognisedText recognisedText;
final Size absoluteImageSize;
final InputImageRotation rotation;
[@override](/user/override)
void paint(Canvas canvas, Size size) {
final Paint paint = Paint()
..style = PaintingStyle.stroke
..strokeWidth = 3.0
..color = Colors.lightGreenAccent;
final Paint background = Paint()..color = Color(0x99000000);
for (final textBlock in recognisedText.blocks) {
final ParagraphBuilder builder = ParagraphBuilder(
ParagraphStyle(textAlign: TextAlign.left, fontSize: 16, textDirection: TextDirection.ltr),
);
builder.pushStyle(ui.TextStyle(color: Colors.lightGreenAccent, background: background));
builder.addText('${textBlock.text}');
builder.pop();
final left = translateX(textBlock.rect.left, rotation, size, absoluteImageSize);
final top = translateY(textBlock.rect.top, rotation, size, absoluteImageSize);
final right = translateX(textBlock.rect.right, rotation, size, absoluteImageSize);
final bottom = translateY(textBlock.rect.bottom, rotation, size, absoluteImageSize);
canvas.drawRect(Rect.fromLTRB(left, top, right, bottom), paint);
canvas.drawParagraph(
builder.build()..layout(ParagraphConstraints(width: right - left)),
Offset(left, top),
);
}
}
[@override](/user/override)
bool shouldRepaint(TextDetectorPainter oldDelegate) {
return oldDelegate.recognisedText != recognisedText;
}
}
class FaceDetectorPainter extends CustomPainter {
FaceDetectorPainter(this.faces, this.absoluteImageSize, this.rotation);
final List<Face> faces;
final Size absoluteImageSize;
final InputImageRotation rotation;
[@override](/user/override)
void paint(Canvas canvas, Size size) {
final Paint paint = Paint()
..style = PaintingStyle.stroke
..strokeWidth = 1.0
..color = Colors.red;
for (final Face face in faces) {
canvas.drawRect(
Rect.fromLTRB(
translateX(face.boundingBox.left, rotation, size, absoluteImageSize),
translateY(face.boundingBox.top, rotation, size, absoluteImageSize),
translateX(face.boundingBox.right, rotation, size, absoluteImageSize),
translateY(face.boundingBox.bottom, rotation, size, absoluteImageSize),
),
paint,
);
void paintContour(FaceContourType type) {
final faceContour = face.getContour(type);
if (faceContour?.positionsList != null) {
for (Offset point in faceContour!.positionsList) {
canvas.drawCircle(
Offset(
translateX(point.dx, rotation, size, absoluteImageSize),
translateY(point.dy, rotation, size, absoluteImageSize),
),
1,
paint);
}
}
}
paintContour(FaceContourType.face);
paintContour(FaceContourType.leftEyebrowTop);
paintContour(FaceContourType.leftEyebrowBottom);
paintContour(FaceContourType.rightEyebrowTop);
paintContour(FaceContourType.rightEyebrowBottom);
paintContour(FaceContourType.leftEye);
paintContour(FaceContourType.rightEye);
paintContour(FaceContourType.upperLipTop);
paintContour(FaceContourType.upperLipBottom);
paintContour(FaceContourType.lowerLipTop);
paintContour(FaceContourType.lowerLipBottom);
paintContour(FaceContourType.noseBridge);
paintContour(FaceContourType.noseBottom);
paintContour(FaceContourType.leftCheek);
paintContour(FaceContourType.rightCheek);
}
}
[@override](/user/override)
bool shouldRepaint(FaceDetectorPainter oldDelegate) {
return oldDelegate.absoluteImageSize != absoluteImageSize || oldDelegate.faces != faces;
}
}
double translateX(double x, InputImageRotation rotation, Size size, Size absoluteImageSize) {
switch (rotation) {
case InputImageRotation.Rotation_90deg:
return x * size.width / (Platform.isIOS ? absoluteImageSize.width : absoluteImageSize.height);
case InputImageRotation.Rotation_270deg:
return size.width - x * size.width / (Platform.isIOS ? absoluteImageSize.width : absoluteImageSize.height);
default:
return x * size.width / absoluteImageSize.width;
}
}
double translateY(double y, InputImageRotation rotation, Size size, Size absoluteImageSize) {
switch (rotation) {
case InputImageRotation.Rotation_90deg:
case InputImageRotation.Rotation_270deg:
return y * size.height / (Platform.isIOS ? absoluteImageSize.height : absoluteImageSize.width);
default:
return y * size.height / absoluteImageSize.height;
}
}
更多关于Flutter图像处理插件camera_process的使用的实战系列教程也可以访问 https://www.itying.com/category-92-b0.html
更多关于Flutter图像处理插件camera_process的使用的实战系列教程也可以访问 https://www.itying.com/category-92-b0.html
当然,以下是一个关于如何在Flutter项目中使用camera_process
插件进行图像处理的示例代码。请注意,camera_process
插件并非一个官方或广泛认可的插件,所以我将基于一个假设的插件接口来展示。如果camera_process
插件的API有所不同,请根据实际情况调整代码。
首先,确保你已经在pubspec.yaml
文件中添加了camera
和假设的camera_process
依赖项:
dependencies:
flutter:
sdk: flutter
camera: ^0.14.0 # 请使用最新版本
camera_process: ^0.1.0 # 假设的版本号,根据实际情况调整
然后,运行flutter pub get
来安装依赖项。
接下来,是示例代码,展示如何使用这些插件来捕获和处理图像:
import 'package:flutter/material.dart';
import 'package:camera/camera.dart';
import 'package:camera_process/camera_process.dart'; // 假设的插件导入
List<CameraDescription> cameras;
CameraController? controller;
void main() {
runApp(MyApp());
}
class MyApp extends StatelessWidget {
@override
Widget build(BuildContext context) {
return MaterialApp(
title: 'Camera Process Demo',
theme: ThemeData(
primarySwatch: Colors.blue,
),
home: MyHomePage(),
);
}
}
class MyHomePage extends StatefulWidget {
@override
_MyHomePageState createState() => _MyHomePageState();
}
class _MyHomePageState extends State<MyHomePage> {
CameraImage? image;
@override
void initState() {
super.initState();
availableCameras().then((availableCameras) {
cameras = availableCameras;
if (cameras.isNotEmpty) {
controller = CameraController(
cameras[0],
ResolutionPreset.medium,
);
controller!.initialize().then((_) {
if (!mounted) {
return;
}
setState(() {});
});
}
}).catchError((err) {
print('Error: ${err.message}');
});
}
@override
void dispose() {
controller?.dispose();
super.dispose();
}
void takePicture() async {
if (!controller!.value.isInitialized) {
return;
}
try {
XFile? file = await controller!.takePicture();
if (file != null) {
// 使用camera_process插件处理图像
// 假设camera_process有一个processImage方法
Uint8List processedImage = await CameraProcess.processImage(file.path);
// 显示或保存处理后的图像
setState(() {
// 这里只是假设processedImage可以直接使用,实际情况可能需要根据processedImage的数据格式做进一步处理
// 例如,如果processedImage是JPEG或PNG格式的字节数组,可以将其显示在Image.memory中
image = CameraImage.fromBytes(
processedImage.lengthInBytes,
processedImage.lengthInBytes,
processedImage,
// 注意:这里的格式和平面数据需要根据实际情况填写
format: CameraImageFormat.jpeg,
planes: [
// 假设只有一个Y平面(实际情况可能不同)
CameraImagePlane(
bytes: processedImage,
bytesPerRow: processedImage.lengthInBytes,
height: // 根据实际情况填写高度
width: // 根据实际情况填写宽度
),
],
);
});
}
} catch (e) {
print(e);
}
}
@override
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(
title: Text('Camera Process Demo'),
),
body: Column(
mainAxisAlignment: MainAxisAlignment.center,
children: <Widget>[
controller!.value.isInitialized
? AspectRatio(
aspectRatio: controller!.value.aspectRatio,
child: CameraPreview(controller!),
)
: Container(),
ElevatedButton(
onPressed: takePicture,
child: Text('Take Picture'),
),
if (image != null)
Image.memory(
// 注意:这里直接使用processedImage可能不正确,因为Image.memory需要正确的图像格式
// 这只是一个示例,实际情况需要根据processedImage的格式进行调整
image!.planes[0].bytes!,
width: image!.width,
height: image!.height,
fit: BoxFit.cover,
),
],
),
);
}
}
注意:
CameraProcess.processImage
是一个假设的方法,你需要根据camera_process
插件的实际API进行调整。CameraImage.fromBytes
的构造方法通常不用于从文件加载图像,这里只是为了演示如何可能地使用处理后的图像数据。在实际应用中,你可能需要将处理后的图像数据转换为适合Image.memory
或其他Flutter图像组件使用的格式。- 上述代码中的错误处理和资源管理(如释放相机资源)需要根据实际情况进行完善。
由于camera_process
插件并非官方插件,且具体API未知,上述代码仅供参考,并可能需要根据实际情况进行调整。如果camera_process
插件有官方文档或示例代码,请参考那些资源以获取更准确的信息。