Flutter AI引擎插件chivox_aiengine的使用
Flutter AI引擎插件chivox_aiengine 的使用
如何添加一个包
在 pubspec.yaml
文件中添加以下内容:
dependencies:
chivox_aiengine: ^0.0.1
参考文档
请参阅 Chivox AI Engine 文档。
完整示例代码
以下是使用 chivox_aiengine
插件的一个完整示例。请确保你已经添加了必要的依赖项,并且已经获取了 appKey
, secretKey
和 provisionB64
。
import 'dart:io';
import 'dart:convert';
import 'package:flutter/rendering.dart';
import 'package:flutter_sound/public/flutter_sound_player.dart';
import 'package:flutter_sound/public/flutter_sound_recorder.dart';
import 'package:path_provider/path_provider.dart';
import 'package:path/path.dart' as path;
import 'package:flutter/material.dart';
import 'dart:async';
import 'package:flutter/services.dart';
import 'package:chivox_aiengine/chivox_aiengine.dart';
import 'package:permission_handler/permission_handler.dart';
const appKey = "your appKey";
const secretKey = "your secretKey";
const provisionB64 = "your provision";
void main() {
runApp(const MyApp());
}
class MyApp extends StatefulWidget {
const MyApp({super.key});
[@override](/user/override)
State<MyApp> createState() => _MyAppState();
}
class _MyAppState extends State<MyApp> {
String _platformVersion = 'Unknown';
ChivoxAiengine? _engine;
String textValue = '';
String displayText = '';
var refText;
String coreType = '';
FlutterSoundPlayer _player = FlutterSoundPlayer();
FlutterSoundRecorder _recorder = FlutterSoundRecorder();
[@override](/user/override)
void initState() {
super.initState();
_player.openAudioSession().then((value) {
print('Player initialization successful');
});
_recorder.openAudioSession().then((value) {
print('Recorder initialized successfully');
});
initPlatformState();
displayText = "apple";
refText = "apple";
coreType = "en.word.score";
aiengineNew();
}
[@override](/user/override)
void dispose() {
_player.closeAudioSession();
_recorder.closeAudioSession();
super.dispose();
}
Future<void> _startRecording() async {
PermissionStatus status = await Permission.microphone.request();
print("=======start recording=====");
if (status.isGranted) {
aiengineStart();
// 录音权限已授予
} else if (status.isDenied) {
// 录音权限被拒绝
print("audio not granted ");
} else if (status.isPermanentlyDenied) {
// 录音权限永久拒绝,引导用户到系统设置页面手动授权
print("audio not granted forever");
}
}
Future<void> _stopRecording() async {
aiengineStop();
}
Future<void> _SetCoreType(String coreTypeTemp) async {
print('Overall: $coreTypeTemp');
coreType = coreTypeTemp;
if ("en.word.score" == coreType) {
displayText = "apple";
refText = "apple";
} else if ("en.sent.score" == coreType) {
displayText = "Thanks for coming to see me";
refText = "Thanks for coming to see me";
} else if ("en.pred.score" == coreType) {
displayText = "Happiness is about having each tiny wish come true, or having something to eat when you are hungry or having someone's love when you need love.";
refText = "Happiness is about having each tiny wish come true, or having something to eat when you are hungry or having someone's love when you need love.";
} else if ("en.choc.score" == coreType) {
displayText = "Tell me an animal that can fly.\n A. Tiger \n B. Panda\n C. Bird \n";
var lm = {
"lm": [
{"answer": 0, "text": "Tiger"},
{"answer": 0, "text": "Panda"},
{"answer": 1, "text": "Bird"},
]
};
refText = lm;
}
print('after set refText');
textValue = ' ';
setState(() {});
print("updata UI");
}
Future<void> _cancelRecording() async {
aiengineCancel();
}
Future<void> _stopPlaying() async {
await _player.stopPlayer();
}
// 平台消息是异步的,因此我们初始化在一个异步方法中。
Future<void> initPlatformState() async {
String platformVersion;
try {
platformVersion = await ChivoxAiengine.getPlatformVersion() ?? 'Unknown platform version';
} on PlatformException {
platformVersion = 'Failed to get platform version.';
}
if (!mounted) return;
setState(() {
_platformVersion = platformVersion;
});
}
[@override](/user/override)
Widget build(BuildContext context) {
return MaterialApp(
home: Scaffold(
appBar: AppBar(
title: const Text('Chivox 在线评估'),
),
body: Center(
child: Column(
mainAxisAlignment: MainAxisAlignment.center,
children: [
Text(
displayText + '\n\n', // 显示要朗读的内容
style: TextStyle(fontSize: 25, height: 1),
),
Row(
mainAxisAlignment: MainAxisAlignment.center,
children: [
ElevatedButton(
child: Text(' 单词 '),
onPressed: () {
_SetCoreType("en.word.score");
},
),
SizedBox(width: 16.0), // 两个按钮之间的间距
ElevatedButton(
child: Text(' 句子 '),
onPressed: () {
_SetCoreType("en.sent.score");
},
),
],
),
SizedBox(height: 16.0), // 两行按钮之间的间距
Row(
mainAxisAlignment: MainAxisAlignment.center,
children: [
ElevatedButton(
child: Text('段落'),
onPressed: () {
_SetCoreType("en.pred.score");
},
),
SizedBox(width: 16.0),
ElevatedButton(
child: Text(' 选择题 '),
onPressed: () {
_SetCoreType("en.choc.score");
},
),
],
),
Text(
"\n评估结果:\n",
style: TextStyle(fontSize: 20, height: 1),
),
Text(
textValue, //
style: TextStyle(fontSize: 25),
),
SizedBox(height: 16.0),
Row(
mainAxisAlignment: MainAxisAlignment.center,
children: [
ElevatedButton(
child: Text('开始录音'),
onPressed: _startRecording,
),
SizedBox(width: 16.0),
ElevatedButton(
child: Text('停止录音'),
onPressed: _stopRecording,
),
],
),
],
),
),
),
);
}
Future<void> aiengineNew() async {
Map<String, dynamic> cfg = {
"appKey": appKey,
"secretKey": secretKey,
"provision": provisionB64,
"cloud": {"server": "wss://cloud.chivox.com:443"}
};
_engine = await ChivoxAiengine.create(json.encode(cfg));
print("after new");
print(_engine);
setState(() {});
}
Future<void> aiengineDelete() async {
setState(() {});
}
Future<void> aiengineStart() async {
textValue = '';
setState(() {});
var audioSrc = {
"srcType": "innerRecorder", // innerRecorder - 内置录音机
"innerRecorderParam": {
"channel": 1,
"sampleBytes": 2,
"sampleRate": 16000,
},
};
var param = {
"soundIntensityEnable": 1,
"coreProvideType": "cloud",
"app": {
"userId": "flutter-test-user"
},
"vad": {
"vadEnable": 1,
"refDuration": 2,
"speechLowSeek": 20
},
"audio": {
"audioType": "wav",
"sampleRate": 16000,
"sampleBytes": 2,
"channel": 1
},
"request": {
"rank": 100,
"refText": refText,
"coreType": coreType,
"attachAudioUrl": 1
}
};
print(param);
print(_engine);
await _engine!.start(
audioSrc,
json.encode(param),
ChivoxAiengineResultListener(
onEvalResult: (ChivoxAiengineResult result) {
print("==========onEvalResult================");
var jsonString = result.text;
Map<String, dynamic> jsonData = jsonDecode(jsonString.toString());
int overall = 0;
int fluency = 0;
int integrity = 0;
int accuracy = 0;
String OverallScore = ' ';
String FluencyScore = ' ';
String IntegrityScore = ' ';
String AccuracyScore = ' ';
print('callback coreType: $coreType');
if ("en.word.score" == coreType) {
overall = jsonData['result']['overall'];
OverallScore = 'Overall Score : ${overall}';
print('en.word.score Overall: $overall');
textValue = 'Overall Score : ${overall}' + '\n';
} else if ("en.sent.score" == coreType) {
overall = jsonData['result']['overall'];
fluency = jsonData['result']['fluency']['overall'];
integrity = jsonData['result']['integrity'];
accuracy = jsonData['result']['accuracy'];
OverallScore = 'Overall Score : ${overall}';
FluencyScore = 'Fluency Score : ${fluency}';
IntegrityScore = 'Integrity Score : ${integrity}';
AccuracyScore = 'Accuracy Score : ${accuracy}';
textValue = OverallScore + '\n' + FluencyScore + '\n' + IntegrityScore + '\n' + AccuracyScore;
} else if ("en.pred.score" == coreType) {
overall = jsonData['result']['overall'];
fluency = jsonData['result']['fluency']['overall'];
integrity = jsonData['result']['integrity'];
accuracy = jsonData['result']['accuracy'];
OverallScore = 'Overall Score : ${overall}';
FluencyScore = 'Fluency Score : ${fluency}';
IntegrityScore = 'Integrity Score : ${integrity}';
AccuracyScore = 'Accuracy Score : ${accuracy}';
textValue = OverallScore + '\n' + FluencyScore + '\n' + IntegrityScore + '\n' + AccuracyScore;
} else if ("en.choc.score" == coreType) {
overall = jsonData['result']['overall'];
OverallScore = 'Overall Score : ${overall}';
textValue = OverallScore + '\n';
}
print('Overall: $overall');
print("==========before set text value================");
setState(() {});
print("==========after set text value=================");
},
onBinaryResult: (result) {
print("==========onBinaryResult================");
},
onError: (result) {
print("==========onError================");
print(result.text);
},
onVad: (result) {
print("==========onVad================");
textValue = result.text.toString();
setState(() {});
},
onSoundIntensity: (result) {
print("==========onSoundIntensity================");
textValue = result.text.toString();
setState(() {});
},
onOther: (result) {
print("==========onOther================");
}));
setState(() {
Timer.periodic(const Duration(seconds: 5), (timer) {
timer.cancel();
//aiengineStop();
});
});
}
Future<void> aiengineStop() async {
print("======stop 1=====");
await _engine!.stop();
print("======stop 2=====");
setState(() {});
}
Future<void> aiengineCancel() async {
print("======cancel 1=====");
await _engine!.cancel();
print("======cancel 2=====");
setState(() {});
}
}
更多关于Flutter AI引擎插件chivox_aiengine的使用的实战教程也可以访问 https://www.itying.com/category-92-b0.html
更多关于Flutter AI引擎插件chivox_aiengine的使用的实战系列教程也可以访问 https://www.itying.com/category-92-b0.html
当然,以下是一个关于如何在Flutter项目中使用chivox_aiengine
插件的示例代码。这个插件通常用于集成语音识别和合成功能。请确保你已经在pubspec.yaml
文件中添加了该插件的依赖项:
dependencies:
flutter:
sdk: flutter
chivox_aiengine: ^最新版本号 # 请替换为实际发布的最新版本号
然后,运行flutter pub get
来安装依赖。
示例代码
以下是一个简单的Flutter应用示例,演示如何使用chivox_aiengine
进行语音识别和文本到语音的合成。
main.dart
import 'package:flutter/material.dart';
import 'package:chivox_aiengine/chivox_aiengine.dart';
void main() {
runApp(MyApp());
}
class MyApp extends StatefulWidget {
@override
_MyAppState createState() => _MyAppState();
}
class _MyAppState extends State<MyApp> {
String recognizedText = "";
ChivoxAIEngine? _chivoxAIEngine;
@override
void initState() {
super.initState();
// 初始化ChivoxAIEngine
_initChivoxAIEngine();
}
Future<void> _initChivoxAIEngine() async {
_chivoxAIEngine = await ChivoxAIEngine.init(
appId: '你的APP_ID', // 替换为你的实际APP_ID
apiKey: '你的API_KEY', // 替换为你的实际API_KEY
);
}
Future<void> startRecognition() async {
if (_chivoxAIEngine != null) {
try {
String result = await _chivoxAIEngine!.startRecognition();
setState(() {
recognizedText = result;
});
} catch (e) {
print("语音识别错误: $e");
}
}
}
Future<void> startSynthesis(String text) async {
if (_chivoxAIEngine != null) {
try {
await _chivoxAIEngine!.startSynthesis(text);
} catch (e) {
print("语音合成错误: $e");
}
}
}
@override
Widget build(BuildContext context) {
return MaterialApp(
home: Scaffold(
appBar: AppBar(
title: Text('Chivox AI Engine Demo'),
),
body: Padding(
padding: const EdgeInsets.all(16.0),
child: Column(
mainAxisAlignment: MainAxisAlignment.center,
children: <Widget>[
Text(
'识别结果:',
style: TextStyle(fontSize: 18),
),
Text(
recognizedText,
style: TextStyle(fontSize: 20),
),
SizedBox(height: 20),
ElevatedButton(
onPressed: startRecognition,
child: Text('开始语音识别'),
),
SizedBox(height: 20),
TextField(
decoration: InputDecoration(
labelText: '输入合成文本',
),
onSubmitted: (text) {
startSynthesis(text);
},
),
SizedBox(height: 20),
ElevatedButton(
onPressed: () {
final textController = TextEditingController()
..text = recognizedText;
startSynthesis(textController.text);
},
child: Text('合成识别结果'),
),
],
),
),
),
);
}
}
注意事项
-
APP_ID 和 API_KEY:在初始化
ChivoxAIEngine
时,你需要提供有效的APP_ID和API_KEY。这些密钥通常由AI服务提供商提供。 -
权限:确保你的Android和iOS项目已经配置了必要的权限,例如麦克风权限。
-
错误处理:在实际应用中,添加更多的错误处理和用户反馈机制是很重要的。
-
平台特定配置:根据平台(Android或iOS)的不同,可能还需要进行一些额外的配置。请参考
chivox_aiengine
插件的官方文档以获取更多信息。
这个示例提供了一个基本的框架,展示了如何在Flutter应用中使用chivox_aiengine
插件进行语音识别和语音合成。你可以根据具体需求进一步扩展和自定义这个示例。