HarmonyOS 鸿蒙Next关于worker+camera+emitter的问题这么用合理吗
HarmonyOS 鸿蒙Next关于worker+camera+emitter的问题这么用合理吗 问题场景:
因为耗时任务比较多,我把相机的一些操作(包括初始化,获取视频帧,释放相机资源)放到了一个子线程CameraWorker,但我在使用这个worker的时候遇到了一些问题,我希望通过CameraWorker初始化相机后,主线程通过worker的onMessage方法可以拿到每一帧,并把每一帧传给我的接口去处理,但我在onMessage里只能拿到刚初始化的第一帧,于是我想在CameraService(相机具体操作代码)中通过emitter去把每一帧的buffer通知主线程,但又遇到了问题,我的每一帧图像大小3112960,在主线程中通过监听emitter并不能拿到此buffer,由于没有报错,调试也看不出哪里有问题,详细代码如下:
// CameraService
import { photoAccessHelper } from '@kit.MediaLibraryKit';
import { fileIo, WriteOptions } from '@kit.CoreFileKit';
import { BusinessError, Callback } from '@kit.BasicServicesKit';
import { camera } from '@kit.CameraKit';
import { common } from '@kit.AbilityKit';
import { image } from '@kit.ImageKit';
import { emitter } from '@kit.BasicServicesKit';
// 双路预览,即应用可同时使用两路预览流,一路用于在屏幕上显示,一路用于图像处理等其他操作,提升处理效率。
const TAG: string = '[CameraService]';
type ImageCallback = (buffer: ArrayBuffer, width: number, height: number) => void;
interface FrameData {
buffer: ArrayBuffer | null;
width: number;
height: number;
}
class CameraService {
private cameraManager?: camera.CameraManager
private cameraInput?: camera.CameraInput
public previewOutput?: camera.PreviewOutput
public previewOutput2?: camera.PreviewOutput
public photoOutput?: camera.PhotoOutput
private photoSession?: camera.PhotoSession
private receiver: image.ImageReceiver | undefined = undefined;
private count: number = 0
private onImageCallback?: ImageCallback;
private frameBuffer: ArrayBuffer | null = null;
private frameWidth: number = 0;
private frameHeight: number = 0;
/**
* 接收帧数据
* @param buffer 帧数据的 ArrayBuffer
* @param width 帧的宽度
* @param height 帧的高度
*/
public getFrameData(buffer: ArrayBuffer, width: number, height: number): void {
this.frameBuffer = buffer;
this.frameWidth = width;
this.frameHeight = height;
}
/**
* 获取帧数据
* @returns 返回帧数据的 buffer、width 和 height
*/
public fetchFrameData(): GeneratedTypeLiteralInterface_1 {
return {
buffer: this.frameBuffer,
width: this.frameWidth,
height: this.frameHeight,
};
}
async initCamera(context: Context, XComponentSurfaceId: string): Promise<void> {
//获取相机管理器实例
this.cameraManager = camera.getCameraManager(context);
//获取所有相机
let cameraArray: Array<camera.CameraDevice> = this.cameraManager.getSupportedCameras();
console.info(TAG, "cameraArray:" + cameraArray[0].cameraId, cameraArray[1].cameraId, cameraArray.length)
// 获取前置摄像头 cameraArray[0]:后置 cameraArray[1]:前置
let cameraDevice = cameraArray[1];
// 创建CameraInput实例
this.cameraInput = this.cameraManager.createCameraInput(cameraDevice);
if (this.cameraInput === undefined) {
console.error('cameraInput is undefined');
return;
}
//打开相机
await this.cameraInput.open();
// 获取当前相机设备支持的模式列表
let modes: Array<camera.SceneMode> = this.cameraManager.getSupportedSceneModes(cameraDevice);
//获取当前相机设备支持的所有输出流
let cameraOutputCap: camera.CameraOutputCapability =
this.cameraManager!.getSupportedOutputCapability(cameraDevice, modes[0]);
// 获取当前设备支持的预览输出流
let previewProfilesArray: Array<camera.Profile> = cameraOutputCap.previewProfiles;
// 获取当前设备支持的拍照输出流
// let photoProfilesArray: Array<camera.Profile> = cameraOutputCap.photoProfiles;
// 获取指定尺寸索引
let position: number = 0;
if (previewProfilesArray != null) {
previewProfilesArray.forEach((value: camera.Profile, index: number) => {
// 查看支持的预览尺寸
console.info(TAG,
`支持的预览尺寸: [${index},${value.size.width},${value.size.height},${value.size.width / value.size.height}]`);
if (value.size.width === 1920 && value.size.height === 1080) {
position = index;
}
})
}
// 预览流1
let previewProfilesObj: camera.Profile = previewProfilesArray[position];
// 预览流2
let previewProfilesObj2: camera.Profile = previewProfilesArray[position];
// 创建预览输出流
this.previewOutput = this.cameraManager!.createPreviewOutput(previewProfilesObj, XComponentSurfaceId);
let size: image.Size = {
height: previewProfilesArray[position].size.height,
width: previewProfilesArray[position].size.width
};
this.receiver = image.createImageReceiver(size, image.ImageFormat.JPEG, 8);
await this.onImageArrival(size, this.receiver)
// 创建 预览流2 输出对象
let imageReceiverSurfaceId: string = await this.receiver.getReceivingSurfaceId();
this.previewOutput2 = this.cameraManager!.createPreviewOutput(previewProfilesObj2, imageReceiverSurfaceId);
// 创建拍照输出流
// this.photoOutput = this.cameraManager!.createPhotoOutput(previewProfilesArray[position]);
// // 注册监听全质量图上报。使用callback异步回调。
// this.photoOutput.on('photoAvailable', (errCode: BusinessError, photo: camera.Photo): void => {
// let imageObj = photo.main;
// // 根据图像的组件类型从图像中获取组件缓存并使用callback返回结果。
// imageObj.getComponent(image.ComponentType.JPEG, async (errCode: BusinessError, component: image.Component) => {
// if (errCode || component === undefined) {
// return;
// }
// let buffer: ArrayBuffer;
// buffer = component.byteBuffer;
// console.info(JSON.stringify(buffer));
// await this.savePicture(buffer);
// imageObj.release();
// })
// })
// 创建会话
this.photoSession = this.cameraManager!.createSession(camera.SceneMode.NORMAL_PHOTO);
// 开始配置会话
this.photoSession.beginConfig();
// 向会话中添加相机输入流
this.photoSession.addInput(this.cameraInput);
// 把 预览流1 加入到会话
this.photoSession.addOutput(this.previewOutput);
// 把 预览流2 加入到会话
this.photoSession.addOutput(this.previewOutput2);
// 提交会话配置
await this.photoSession.commitConfig();
// 启动会话
await this.photoSession.start();
this.photoSession.on('error', (error: BusinessError) => {
console.error(`Photo session error code: ${error.code}`);
});
}
// 注册图像到达时的回调
setImageCallback(callback: ImageCallback) {
this.onImageCallback = callback;
}
/**
* 用于处理图像并将 byteBuffer 输出到回调函数
* @param size 图像大小
* @param receiver 图像接收器
* @param onImageProcessed 回调函数,用于接收 imgComponent.byteBuffer
*/
async onImageArrival(size: camera.Size, receiver: image.ImageReceiver): Promise<void> {
// 接收图片时注册回调
receiver.on('imageArrival', () => {
console.info("imageArrival")
// 从ImageReceiver读取下一张图片,并使用promise返回结果。
receiver.readNextImage((err: BusinessError, nextImage: image.Image) => {
if (err || nextImage === undefined) {
console.error('readNextImage failed');
return;
}
// 根据图像的组件类型从图像中获取组件缓存并使用callback返回结果
nextImage.getComponent(image.ComponentType.JPEG, (err: BusinessError, imgComponent: image.Component) => {
if (err || imgComponent === undefined) {
console.error('getComponent failed');
}
// image.Component描述图像颜色分量。
if (imgComponent && imgComponent.byteBuffer as ArrayBuffer) {
let eventData: emitter.EventData = {
data: {
"buffer": imgComponent.byteBuffer,
"width": size.width,
"height": size.height,
}
};
emitter.emit("getFrame", eventData);
}
nextImage.release();
})
})
})
}
// 释放相机资源
async releaseCamera(): Promise<void> {
// 关闭摄像头和释放会话流程。
console.info(TAG,'releaseCamera is called');
try {
await this.photoSession?.stop();
await this.cameraInput?.close();
await this.previewOutput?.release();
await this.previewOutput2?.release();
await this.photoOutput?.release();
await this.photoSession?.release();
await this.receiver?.release()
} catch (error) {
let err = error as BusinessError;
console.error(TAG,`releaseCamera fail: error: ${JSON.stringify(err)}`);
} finally {
this.previewOutput = undefined;
this.previewOutput2 = undefined;
this.photoOutput = undefined;
this.cameraManager = undefined;
this.photoSession = undefined;
this.cameraInput = undefined;
this.receiver = undefined;
}
console.info(TAG,'releaseCamera success');
}
}
export default CameraService;
// CameraWorker
import { ErrorEvent, MessageEvents, ThreadWorkerGlobalScope, worker } from '@kit.ArkTS';
import CameraService from '../viewmodel/CameraService';
const workerPort: ThreadWorkerGlobalScope = worker.workerPort;
// 自定义消息格式
interface MessageInfo {
hasResolve: boolean;
type: string;
context: Context; // 注意worker线程中无法使用getContext()直接获取宿主线程context,需要通过消息从宿主线程通信到worker线程使用。
surfaceId: string;
}
workerPort.onmessage = async (e: MessageEvents) => {
const cameraService = new CameraService();
const messageInfo: MessageInfo = e.data;
console.info(`worker onmessage type:${messageInfo.type}`)
if ('initCamera' === messageInfo.type) {
// 在worker线程中收到宿主线程初始化相机的消息
console.info(`worker initCamera surfaceId:${messageInfo.surfaceId}`)
// 在worker线程中初始化相机
await cameraService.initCamera(messageInfo.context, messageInfo.surfaceId);
// 初始化完成后,通知主线程
workerPort.postMessage({ type: 'initCameraDone' });
workerPort.postMessage('hhhhhhhh')
} else if ('releaseCamera' === messageInfo.type) {
// 在worker线程中收到宿主线程释放相机的消息
console.info('worker releaseCamera.');
// 在worker线程中释放相机
await cameraService.releaseCamera();
}
}
workerPort.onmessageerror = (e: MessageEvents) => {
}
workerPort.onerror = (e: ErrorEvent) => {
}
// 主线程
import CameraHelper from '../utils/CameraHelper'
import grantPermission from '../utils/PermissionUtils'
import { BusinessError, emitter } from '@kit.BasicServicesKit';
import { display } from '@kit.ArkUI';
import { ASF_DetectMode, ASF_DetectModel, ASF_OrientPriority, multiFaceInfo } from './arkCommDef';
import { camera } from '@kit.CameraKit';
import { image } from '@kit.ImageKit';
import { WriteOptions } from '@kit.CoreFileKit';
import { Rect } from '@kit.ArkGraphics3D';
import { FaceHelper } from '../utils/FaceHelper';
import { DrawHelper } from '../utils/DrawHelper';
import * as arcEnumManager from "arcsoft_face"
import { worker, MessageEvents, ErrorEvent } from '@kit.ArkTS';
import {
FaceEngine,
ImageFormat,
ErrorInfo,
SingleFaceInfo,
FaceFeature,
LivenessInfo,
AgeInfo,
GenderInfo,
FaceInfo,
CombinedMask,
DetectMode,
OrientPriority
} from 'arcsoft_face';
import { FTInfo } from '../interfaces/ftInfo';
import CameraService from '../viewmodel/CameraService';
@Builder
export function RecognizeAndRegisterBuilder() {
RecognizeAndRegister()
}
const TAG = "[RecognizeAndRegister]"
@Component
struct RecognizeAndRegister {
private controller: XComponentController = new XComponentController()
private surfaceId: string = '';
private screenWidth: number = 0;
private screenHeight: number = 0;
private surfaceWidth: number = 0;
private surfaceHeight: number = 0;
private isInit: number = -1;
private UIContext = getContext();
@State currentBuffer: ArrayBuffer = new ArrayBuffer(0)
@State cachedWidth: number = 0;
@State registerStatus: number = 0;
//用来配置CanvasRenderingContext2D对象的参数,包括是否开启抗锯齿,true表明开启抗锯齿。
private settings: RenderingContextSettings = new RenderingContextSettings(true)
//用来创建CanvasRenderingContext2D对象,通过在canvas中调用CanvasRenderingContext2D对象来绘制。
private context: CanvasRenderingContext2D = new CanvasRenderingContext2D(this.settings)
// 创建ThreadWorker对象获取worker实例
private cameraWorker: worker.ThreadWorker =
new worker.ThreadWorker('entry/ets/workers/CameraWorker.ets');
// private faceHelper = new FaceHelper();
private drawHelper = new DrawHelper(this.context);
private faceEngine = new FaceEngine()
async aboutToAppear() {
this.screenWidth = display.getDefaultDisplaySync().width
this.surfaceWidth = this.screenWidth;
this.surfaceHeight = this.screenWidth / 9 * 16
}
onPageShow(): void {
if ('' !== this.surfaceId) {
// 通过worker实例向worker线程发送消息初始化相机
this.cameraWorker.postMessage({
type: 'initCamera',
context: getContext(this),
surfaceId: this.surfaceId,
})
}
}
onPageHide() {
// 通过worker实例向worker线程发送消息销毁相机
this.cameraWorker.postMessage({
type: 'releaseCamera',
})
}
async XComponentInit() {
let surfaceRect: SurfaceRect = {
surfaceWidth: this.screenWidth,
surfaceHeight: this.screenWidth / 9 * 16
};
this.controller.setXComponentSurfaceRect(surfaceRect);
this.surfaceId = this.controller.getXComponentSurfaceId();
// await this.cameraHelper.initCamera(this.surfaceId);
if (!this.cameraWorker) {
console.error('create stage worker failed');
return;
}
// 宿主线程向worker线程发送初始化相机消息
this.cameraWorker.postMessage({
type: 'initCamera',
context: getContext(this), // 将宿主线程的context传给worker线程使用
surfaceId: this.surfaceId, // 将surfaceId传给worker线程使用
})
let res = undefined
// 设置主线程的onmessage回调,用于接收worker线程发送的帧数据
this.cameraWorker.onmessage = (e) => {
console.info("onmessage,,,", "onmessage",e)
res = e.data;
console.info("worker:: res is " + JSON.stringify(res));
}
this.cameraWorker.onmessage = (e: MessageEvents): void => {
const frameData: FrameData = e.data;
console.info("Received frame data:", frameData);
// 销毁Worker对象
// this.cameraWorker.terminate();
};
this.cameraWorker.onerror = (err: ErrorEvent) => {
console.log("main error message " + err.message);
}
// 注册事件监听
emitter.on('getFrame', (data) => {
console.info('getFrame',JSON.stringify(data))
if (data && data.data) {
// console.info("emitter", "getFrame " ,data.data.width, data.data.height,data.data.buffer.byteLength);
}
})
}
// 绘制人脸框浮层
@Builder
OverlayNode() {
Column() {
Flex({
direction: FlexDirection.Column,
alignItems: ItemAlign.Center,
justifyContent: FlexAlign.Center
}) {
//在canvas中调用CanvasRenderingContext2D对象。
Canvas(this.context)
.width('100%')
.height('100%')
.backgroundColor('transparent')
.onReady(() => {
})
}
}.width(this.screenWidth).height(this.screenWidth / 9 * 16).alignItems(HorizontalAlign.Center)
}
build() {
NavDestination() {
Stack({ alignContent: Alignment.BottomEnd }) {
Column() {
Row() {
XComponent({ type: XComponentType.SURFACE, controller: this.controller })
.onLoad(async () => {
await this.XComponentInit()
})
.overlay(this.OverlayNode(), { align: Alignment.Center })
}.width('100%').flexGrow(1)
.aspectRatio(9 / 16)
}.width('100%').height('100%').backgroundColor(Color.Black)
}
}.title('人脸识别')
}
}
更多关于HarmonyOS 鸿蒙Next关于worker+camera+emitter的问题这么用合理吗的实战教程也可以访问 https://www.itying.com/category-93-b0.html
更多关于HarmonyOS 鸿蒙Next关于worker+camera+emitter的问题这么用合理吗的实战系列教程也可以访问 https://www.itying.com/category-93-b0.html
在HarmonyOS(鸿蒙Next)中,使用Worker
、Camera
和Emitter
的组合是合理的,但需要确保它们的协作符合系统架构和性能优化的要求。
-
Worker:用于在后台线程中执行耗时操作,避免阻塞主线程。在涉及
Camera
的操作中,可以将图像处理、数据编码等任务放在Worker
中执行,以提升应用的响应速度。 -
Camera:负责图像捕获和处理。在鸿蒙系统中,
Camera
的操作通常涉及预览、拍照、录像等功能。为了确保流畅的用户体验,建议将Camera
的初始化、配置和释放放在主线程中,而将数据处理任务交给Worker
。 -
Emitter:用于事件驱动机制的实现,通常用于模块间的通信。在
Camera
和Worker
的协作中,Emitter
可以用于传递事件,例如图像捕获完成、数据处理完成等,以实现模块间的解耦和高效通信。
总体来说,Worker
、Camera
和Emitter
的组合在鸿蒙系统中是合理的,但需要注意线程管理、资源释放和事件传递的优化,以确保系统的稳定性和性能。