HarmonyOS 鸿蒙Next在官网API中无法找到设置通过听筒播放音频的能力
HarmonyOS 鸿蒙Next在官网API中无法找到设置通过听筒播放音频的能力 业务需求需要设置通过听筒播放音频文件,但是在官网文档中无法找到设置通过听筒播放音频的API,烦请指导一下。
请参考以下代码:
import audio from '@ohos.multimedia.audio';
import fs from '@ohos.file.fs';
import common from '@ohos.app.ability.common';
import { Context } from '@ohos.abilityAccessCtrl';
const TAG = 'AudioRendererDemo';
export class AudioRendererHelper {
static instance = new AudioRendererHelper()
private context: Context;
private renderModel: audio.AudioRenderer | undefined = undefined;
private audioStreamInfo: audio.AudioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
channels: audio.AudioChannel.CHANNEL_1,
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
};
private audioRendererInfo: audio.AudioRendererInfo = {
usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, // 语音通信。
rendererFlags: 0
};
private audioRendererOptions: audio.AudioRendererOptions = {
streamInfo: this.audioStreamInfo,
rendererInfo: this.audioRendererInfo
};
// 开始一次音频渲染
async start(name: string,context: Context = null) {
this.context = context;
this.renderModel = await audio.createAudioRenderer(this.audioRendererOptions);
if (this.renderModel !== undefined) {
let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
if (stateGroup.indexOf((this.renderModel as audio.AudioRenderer).state.valueOf()) === -1) { // 当且仅当状态为prepared、paused和stopped之一时才能启动渲染
console.error(TAG + 'start failed');
return;
}
await (this.renderModel as audio.AudioRenderer).start(); // 启动渲染
const bufferSize = await (this.renderModel as audio.AudioRenderer).getBufferSize();
let path = this.context.filesDir;
const filePath = path + name; // 使用沙箱路径获取文件
console.info("*****************************************")
console.info(filePath)
let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY);
let stat = await fs.stat(filePath);
let buf = new ArrayBuffer(bufferSize);
let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1);
class Options {
offset: number = 0;
length: number = 0
}
for (let i = 0; i < len; i++) {
let options: Options = {
offset: i * bufferSize,
length: bufferSize
};
let readsize = await fs.read(file.fd, buf, options);
// buf是要写入缓冲区的音频数据,在调用AudioRenderer.write()方法前可以进行音频数据的预处理,实现个性化的音频播放功能,AudioRenderer会读出写入缓冲区的音频数据进行渲染
let writeSize: number = await (this.renderModel as audio.AudioRenderer).write(buf);
if ((this.renderModel as audio.AudioRenderer).state.valueOf() === audio.AudioState.STATE_RELEASED) { // 如果渲染器状态为released,关闭资源
fs.close(file);
}
if ((this.renderModel as audio.AudioRenderer).state.valueOf() === audio.AudioState.STATE_RUNNING) {
if (i === len - 1) { // 如果音频文件已经被读取完,停止渲染
fs.close(file);
await (this.renderModel as audio.AudioRenderer).stop();
if ((this.renderModel as audio.AudioRenderer).state.valueOf() === audio.AudioState.STATE_STOPPED) {
console.info('Renderer stopped.');
} else {
console.error('Stopping renderer failed.');
}
}
}
}
}
}
// 停止渲染
async stop() {
if (this.renderModel !== undefined) {
// 只有渲染器状态为running或paused的时候才可以停止
if ((this.renderModel as audio.AudioRenderer).state.valueOf() !== audio.AudioState.STATE_RUNNING && (this.renderModel as audio.AudioRenderer).state.valueOf() !== audio.AudioState.STATE_PAUSED) {
console.info('Renderer is not running or paused.');
return;
}
await (this.renderModel as audio.AudioRenderer).stop(); // 停止渲染
if ((this.renderModel as audio.AudioRenderer).state.valueOf() === audio.AudioState.STATE_STOPPED) {
console.info('Renderer stopped.');
} else {
console.error('Stopping renderer failed.');
}
}
}
// 销毁实例,释放资源
async release() {
if (this.renderModel !== undefined) {
// 渲染器状态不是released状态,才能release
if (this.renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) {
console.info('Renderer already released');
return;
}
await this.renderModel.release(); // 释放资源
if (this.renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) {
console.info('Renderer released');
} else {
console.error('Renderer release failed.');
}
}
}
}
//Index.ets中:
import audio from '@ohos.multimedia.audio';
import fs from '@ohos.file.fs';
import { AudioRendererHelper } from '../utils/AudioRendererHelper'
import { AVCastPicker } from '@ohos.multimedia.avCastPicker'
import { AVSessionManager } from '@ohos.multimedia.avsession'
import { AVCastPickerState } from '@ohos.multimedia.avCastPickerParam';
import { promptAction, PromptAction } from '@kit.ArkUI';
@Entry
@Component
struct Index {
audioRenderer = AudioRendererHelper.instance
private TAG = 'AudioCapturerDemo';
private context = getContext(this);
private audioCapturer: audio.AudioCapturer | undefined = undefined;
private audioStreamInfo: audio.AudioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, //音频文件的采样率
channels: audio.AudioChannel.CHANNEL_1, //音频文件的通道数
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, //音频采样格式
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW //音频编码格式 PCM编码
}
private audioCapturerInfo: audio.AudioCapturerInfo = {
source: audio.SourceType.SOURCE_TYPE_VOICE_RECOGNITION,
capturerFlags: 0 // 音频采集器标志
}
private audioCapturerOptions: audio.AudioCapturerOptions = {
streamInfo: this.audioStreamInfo, //音频流信息
capturerInfo: this.audioCapturerInfo //采集器信息
}
@State numBuffersToCapture: boolean = true; //循环写入
//初始化 创建实例 设置监听事件
async init() {
audio.createAudioCapturer(this.audioCapturerOptions, (err, capturer) => { // 创建AudioCapturer实例
if (err) {
console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`);
return;
}
console.info(`${this.TAG}: create AudioCapturer success`);
this.audioCapturer = capturer;
if (this.audioCapturer !== undefined) {
(this.audioCapturer as audio.AudioCapturer).on('markReach', 1000, (position: number) => { // 订阅markReach事件,当采集的帧数达到1000时触发回调
if (position === 1000) {
console.info('ON Triggered successfully-markReach');
}
});
(this.audioCapturer as audio.AudioCapturer).on('periodReach', 2000, (position: number) => { // 订阅periodReach事件,当采集的帧数达到2000时触发回调
if (position === 2000) {
console.info('ON Triggered successfully-periodReach');
}
});
}
this.audioCapturer!.on('stateChange', (state) => {
console.info('AudioCapturerLog: Changed State to : ' + state)
switch (state) {
case audio.AudioState.STATE_PREPARED:
console.info('--------CHANGE IN AUDIO STATE----------PREPARED--------------')
console.info('Audio State is : Prepared')
this.numBuffersToCapture = true;
break;
case audio.AudioState.STATE_RUNNING:
console.info('--------CHANGE IN AUDIO STATE----------RUNNING--------------')
console.info('Audio State is : Running')
this.numBuffersToCapture = true;
break;
case audio.AudioState.STATE_STOPPED:
console.info('--------CHANGE IN AUDIO STATE----------STOPPED--------------')
console.info('Audio State is : stopped')
this.numBuffersToCapture = false;
break;
case audio.AudioState.STATE_RELEASED:
console.info('--------CHANGE IN AUDIO STATE----------RELEASED--------------')
console.info('Audio State is : released')
this.numBuffersToCapture = false;
break;
default:
console.info('--------CHANGE IN AUDIO STATE----------INVALID--------------')
console.info('Audio State is : invalid')
this.numBuffersToCapture = false;
break;
}
});
});
}
// 开始一次音频采集
async start() {
if (this.audioCapturer !== undefined) {
console.log("1231321321321")
let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
if (stateGroup.indexOf((this.audioCapturer as audio.AudioCapturer).state.valueOf()) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动采集
console.error(`${this.TAG}: start failed`);
return;
}
console.log("111111111111111111")
await (this.audioCapturer as audio.AudioCapturer).start(); // 启动采集
console.log("222222222222222222")
const filePath = this.context.filesDir + '/test123.pcm'; // 采集到的音频文件存储路径
console.log("filepath" + filePath)
let file: fs.File = fs.openSync(filePath, fs.OpenMode.READ_WRITE | fs.OpenMode.CREATE); // 如果文件不存在则创建文件
let fd = file.fd;
let count = 0;
class Options {
offset: number = 0;
length: number = 0
}
console.log("33333333333333333333")
while (this.numBuffersToCapture) {
console.log("44444444444444444444")
let bufferSize = await (this.audioCapturer as audio.AudioCapturer).getBufferSize();
let buffer = await (this.audioCapturer as audio.AudioCapturer).read(bufferSize, true);
let options: Options = {
offset: count * bufferSize,
length: bufferSize
};
if (buffer === undefined) {
console.error(`${this.TAG}: read buffer failed`);
} else {
let number = fs.writeSync(fd, buffer, options);
console.info(`${this.TAG}: write date: ${number}`);
}
console.log("-------------------" + this.numBuffersToCapture)
count++;
}
}
}
// 停止采集
async stop() {
if (this.audioCapturer !== undefined) {
// 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
if ((this.audioCapturer as audio.AudioCapturer).state.valueOf() !== audio.AudioState.STATE_RUNNING && (this.audioCapturer as audio.AudioCapturer).state.valueOf() !== audio.AudioState.STATE_PAUSED) {
console.info('Capturer is not running or paused');
return;
}
await (this.audioCapturer as audio.AudioCapturer).stop(); // 停止采集
if ((this.audioCapturer as audio.AudioCapturer).state.valueOf() === audio.AudioState.STATE_STOPPED) {
console.info('Capturer stopped');
} else {
console.error('Capturer stop failed');
}
}
}
// 销毁实例,释放资源
async release() {
if (this.audioCapturer !== undefined) {
// 采集器状态不是STATE_RELEASED或STATE_NEW状态,才能release
if ((this.audioCapturer as audio.AudioCapturer).state.valueOf() === audio.AudioState.STATE_RELEASED || (this.audioCapturer as audio.AudioCapturer).state.valueOf() === audio.AudioState.STATE_NEW) {
console.info('Capturer already released');
return;
}
await (this.audioCapturer as audio.AudioCapturer).release(); // 释放资源
if ((this.audioCapturer as audio.AudioCapturer).state.valueOf() === audio.AudioState.STATE_RELEASED) {
console.info('Capturer released');
} else {
console.error('Capturer release failed');
}
}
}
// -----------------------------------------------------------------------------------------
async onPageShow(){
// 开始创建并激活媒体会话
// 创建session
let context: Context = getContext(this)
let type: AVSessionManager.AVSessionType = 'voice_call';
let session = await AVSessionManager.createAVSession(context,'SESSION_NAME', type);
console.log('AVSession成功:'+JSON.stringify(session))
// 激活接口要在元数据、控制命令注册完成之后再执行
await session.activate();
console.info(`session create done : sessionId : ${session.sessionId}`);
}
build() {
Row() {
Column() {
Button() {
Text("初始化")
.fontSize(20)
}.height(100)
.width(100)
.onClick(() => {
this.init()
promptAction.showToast({
message: "初始化完成",
duration: 2000
})
})
Button() {
Text("开始录制")
.fontSize(20)
}
.height(100)
.width(100)
.onClick(() => {
this.start()
promptAction.showToast({
message: "开始录制音频",
duration: 2000
})
})
Button() {
Text("停止录制")
.fontSize(20)
}
.height(100)
.width(100)
.onClick(() => {
this.stop()
promptAction.showToast({
message: "停止录制音频",
duration: 2000
})
})
Button() {
Text("释放资源")
.fontSize(20)
}
.height(100)
.width(100)
.onClick(() => {
this.release()
promptAction.showToast({
message: "释放资源完成",
duration: 2000
})
})
Button() {
Text("播放音频")
.fontSize(20)
}
.height(100)
.width(100)
.backgroundColor(Color.Yellow)
.onClick(() => {
this.audioRenderer.start("/test123.pcm",this.context)
})
AVCastPicker()
.width('40vp')
.height('40vp')
.border({ width: 1, color: Color.Red })
}
}
}
更多关于HarmonyOS 鸿蒙Next在官网API中无法找到设置通过听筒播放音频的能力的实战系列教程也可以访问 https://www.itying.com/category-93-b0.html
小蛋糕是官方的技术支持吗,您有没有考虑过大部分人播放的音频都不是PCM编码,audiorenderer只能播放PCM编码,解码功能是C++的没有提供arkapi。一个简单的听筒扬声器切换我们还得研究编解码。
针对帖子标题“HarmonyOS 鸿蒙Next在官网API中无法找到设置通过听筒播放音频的能力”的问题,以下为专业回答:
在HarmonyOS鸿蒙Next系统中,如果无法在官网API中找到设置通过听筒播放音频的具体方法,这可能是由于系统API的更新或特定功能的实现方式有所变化。HarmonyOS作为华为自主研发的操作系统,其API设计与Android等传统系统可能存在差异。
对于音频播放路径(如听筒或扬声器)的设置,通常涉及到底层音频管理模块。在HarmonyOS中,可能需要通过特定的系统服务或接口来实现这一功能。由于这些接口可能并非公开API,或者在新版本中有所调整,因此建议查阅最新的HarmonyOS开发者文档或SDK,以获取最准确的信息。
此外,虽然HarmonyOS与Android在底层有共通之处,但直接套用Android的音频管理方法可能并不适用。开发者应专注于HarmonyOS提供的官方文档和示例代码,以确保应用的兼容性和稳定性。
如果问题依旧没法解决请联系官网客服,官网地址是:https://www.itying.com/category-93-b0.html