Flutter语音处理插件voice的使用
Flutter语音处理插件Voice的使用
简介
voice
是一个用于语音处理的 Flutter 插件。它允许开发者在 Flutter 应用中集成语音相关的功能。本文档将展示如何使用 voice
插件,并提供一个完整的示例代码。
使用步骤
-
初始化项目 首先,确保你的 Flutter 环境已正确配置。然后创建一个新的 Flutter 项目:
flutter create voice_example cd voice_example
-
添加依赖 在项目的
pubspec.yaml
文件中添加voice
插件依赖:dependencies: voice: ^版本号
执行以下命令以更新依赖:
flutter pub get
-
权限请求 在 Android 和 iOS 平台上,可能需要请求某些权限(如存储权限)。使用
permission_handler
插件来管理权限请求。import 'package:permission_handler/permission_handler.dart'; Future<void> main() async { WidgetsFlutterBinding.ensureInitialized(); Map<Permission, PermissionStatus> statuses = await [ Permission.ignoreBatteryOptimizations, Permission.storage, ].request(); }
-
初始化插件 在应用启动时,初始化
voice
插件并设置必要的参数。import 'package:voice/voice.dart'; class MyApp extends StatefulWidget { [@override](/user/override) State<MyApp> createState() => _MyAppState(); } class _MyAppState extends State<MyApp> { final voice = Voice(); [@override](/user/override) void initState() { super.initState(); // 初始化插件 } }
-
运行语音处理任务 使用
voice.run()
方法启动语音处理任务,并监听其状态变化。FloatingActionButton.extended( onPressed: () async { pid = await voice.run(Proxy( port, host, name, )); setState(() { loading = true; }); BotToast.showText(text: "启动中..."); Timer.periodic(const Duration(seconds: 2), (timer) async { int code = await checkProxy(""); tryCount += 1; if (code == 200) { SpUtil.putString("name", name ?? ""); SpUtil.putString("port", port ?? ""); timer.cancel(); setState(() { status = true; loading = false; BotToast.showText(text: "启动成功"); }); } else if (tryCount > 4) { timer.cancel(); setState(() { tryCount = 0; loading = false; BotToast.showText(text: "启动失败"); }); } }); }, backgroundColor: Colors.green, foregroundColor: Colors.white, icon: const Icon(Icons.not_started_rounded), label: const Text("启动"), )
-
停止任务 使用
voice.stop()
方法停止语音处理任务。FloatingActionButton.extended( onPressed: () async { await voice.stop(pid ?? ""); setState(() { status = false; }); }, backgroundColor: Colors.redAccent, foregroundColor: Colors.white, icon: const Icon(Icons.stop_circle_rounded), label: const Text("停止"), )
完整示例代码
以下是一个完整的示例代码,展示了如何使用 voice
插件进行语音处理。
import 'dart:async';
import 'dart:io';
import 'dart:math';
import 'package:bot_toast/bot_toast.dart';
import 'package:flutter/material.dart';
import 'package:flutter/services.dart';
import 'package:permission_handler/permission_handler.dart';
import 'package:sp_util/sp_util.dart';
import 'package:uuid/uuid.dart';
import 'package:voice/proxy.dart';
import 'package:voice/voice.dart';
Future<void> main() async {
WidgetsFlutterBinding.ensureInitialized();
Map<Permission, PermissionStatus> statuses = await [
Permission.ignoreBatteryOptimizations,
Permission.storage,
].request();
await SpUtil.getInstance();
if (Platform.isAndroid) {
SystemUiOverlayStyle style = const SystemUiOverlayStyle(
statusBarColor: Colors.transparent,
statusBarIconBrightness: Brightness.light);
SystemChrome.setSystemUIOverlayStyle(style);
}
runApp(const MyApp());
}
class MyApp extends StatefulWidget {
const MyApp({super.key});
[@override](/user/override)
State<MyApp> createState() => _MyAppState();
}
class _MyAppState extends State<MyApp> {
final voice = Voice();
String? pid;
final _random = Random();
String? port;
String? name;
String host = '121.37.139.13';
bool status = false;
bool loading = false;
int tryCount = 0;
[@override](/user/override)
void initState() {
super.initState();
port = SpUtil.getString("port", defValue: next(1000, 9999).toString());
name = SpUtil.getString("name",
defValue: const Uuid().v1().replaceAll("-", ""));
}
int next(int min, int max) {
var result = min + _random.nextInt(max - min);
return result;
}
Future<int> checkProxy(String s) async {
HttpClient httpClient = HttpClient();
HttpClientRequest request =
await httpClient.getUrl(Uri.parse("https://www.baidu.com"));
request.headers.add(
"user-agent",
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1",
);
var proxy = "PROXY $host:$port";
httpClient.findProxy = (uri) {
return proxy;
};
HttpClientResponse response = await request.close();
var status = response.statusCode;
httpClient.close();
print(status);
return status;
}
[@override](/user/override)
void dispose() {
super.dispose();
}
[@override](/user/override)
Widget build(BuildContext context) {
return MaterialApp(
builder: BotToastInit(),
home: Scaffold(
appBar: AppBar(
title: const Text('冬瓜嗅探'),
),
body: Center(
child: Column(
children: [
const SizedBox(
height: 150,
),
const Text(
'重要提示:一定要设置不对后台行为做限制',
style: TextStyle(fontSize: 14, fontWeight: FontWeight.bold),
),
const SizedBox(
height: 20,
),
status
? FloatingActionButton.extended(
onPressed: () async {
Clipboard.setData(
ClipboardData(text: "$host:${port!}"));
},
backgroundColor: Colors.blue,
foregroundColor: Colors.white,
icon: const Icon(Icons.copy),
label: const Text("点击复制代理地址"),
)
: FloatingActionButton.extended(
onPressed: () async {
pid = await voice.run(Proxy(
port,
host,
name,
));
setState(() {
loading = true;
});
BotToast.showText(text: "启动中...");
Timer.periodic(const Duration(seconds: 2), (timer) async {
int code = await checkProxy("");
tryCount += 1;
if (code == 200) {
SpUtil.putString("name", name ?? "");
SpUtil.putString("port", port ?? "");
timer.cancel();
setState(() {
status = true;
loading = false;
BotToast.showText(text: "启动成功");
});
} else if (tryCount > 4) {
timer.cancel();
setState(() {
tryCount = 0;
loading = false;
BotToast.showText(text: "启动失败");
});
}
});
},
backgroundColor: Colors.green,
foregroundColor: Colors.white,
icon: const Icon(Icons.not_started_rounded),
label: const Text("启动"),
),
FloatingActionButton.extended(
onPressed: () async {
await voice.stop(pid ?? "");
setState(() {
status = false;
});
},
backgroundColor: Colors.redAccent,
foregroundColor: Colors.white,
icon: const Icon(Icons.stop_circle_rounded),
label: const Text("停止"),
),
],
),
),
),
);
}
}
更多关于Flutter语音处理插件voice的使用的实战系列教程也可以访问 https://www.itying.com/category-92-b0.html
在Flutter中,voice
插件是一个用于处理语音的插件,它可以帮助你录制、播放和处理音频文件。以下是如何使用 voice
插件的基本步骤:
1. 添加依赖
首先,你需要在 pubspec.yaml
文件中添加 voice
插件的依赖:
dependencies:
flutter:
sdk: flutter
voice: ^0.1.0 # 请检查最新版本
然后运行 flutter pub get
来安装依赖。
2. 导入插件
在你的 Dart 文件中导入 voice
插件:
import 'package:voice/voice.dart';
3. 初始化 Voice
在使用 voice
插件之前,你需要初始化它:
Voice voice = Voice();
4. 录制音频
你可以使用 startRecording
方法来开始录制音频:
void startRecording() async {
await voice.startRecording();
print("Recording started");
}
5. 停止录制并保存音频
使用 stopRecording
方法来停止录制并保存音频文件:
void stopRecording() async {
String? filePath = await voice.stopRecording();
if (filePath != null) {
print("Recording stopped and saved at: $filePath");
} else {
print("Recording failed to save");
}
}
6. 播放音频
你可以使用 play
方法来播放录制的音频:
void playRecording(String filePath) async {
await voice.play(filePath);
print("Playing audio from: $filePath");
}
7. 停止播放
使用 stopPlaying
方法来停止播放音频:
void stopPlaying() async {
await voice.stopPlaying();
print("Playback stopped");
}
8. 处理权限
在 Android 和 iOS 上,录制音频需要获取麦克风权限。你需要在 AndroidManifest.xml
和 Info.plist
中添加相应的权限声明。
AndroidManifest.xml:
<uses-permission android:name="android.permission.RECORD_AUDIO" />
Info.plist:
<key>NSMicrophoneUsageDescription</key>
<string>We need access to your microphone to record audio.</string>
9. 请求权限
在 Flutter 中,你可以使用 permission_handler
插件来请求麦克风权限:
import 'package:permission_handler/permission_handler.dart';
void requestMicrophonePermission() async {
var status = await Permission.microphone.request();
if (status.isGranted) {
print("Microphone permission granted");
} else {
print("Microphone permission denied");
}
}
10. 完整示例
以下是一个完整的示例,展示了如何使用 voice
插件录制、播放音频:
import 'package:flutter/material.dart';
import 'package:voice/voice.dart';
import 'package:permission_handler/permission_handler.dart';
void main() {
runApp(MyApp());
}
class MyApp extends StatelessWidget {
[@override](/user/override)
Widget build(BuildContext context) {
return MaterialApp(
home: VoiceExample(),
);
}
}
class VoiceExample extends StatefulWidget {
[@override](/user/override)
_VoiceExampleState createState() => _VoiceExampleState();
}
class _VoiceExampleState extends State<VoiceExample> {
Voice voice = Voice();
String? filePath;
void startRecording() async {
await voice.startRecording();
print("Recording started");
}
void stopRecording() async {
filePath = await voice.stopRecording();
if (filePath != null) {
print("Recording stopped and saved at: $filePath");
} else {
print("Recording failed to save");
}
}
void playRecording() async {
if (filePath != null) {
await voice.play(filePath!);
print("Playing audio from: $filePath");
} else {
print("No audio file to play");
}
}
void stopPlaying() async {
await voice.stopPlaying();
print("Playback stopped");
}
void requestMicrophonePermission() async {
var status = await Permission.microphone.request();
if (status.isGranted) {
print("Microphone permission granted");
} else {
print("Microphone permission denied");
}
}
[@override](/user/override)
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(
title: Text("Voice Example"),
),
body: Center(
child: Column(
mainAxisAlignment: MainAxisAlignment.center,
children: <Widget>[
ElevatedButton(
onPressed: requestMicrophonePermission,
child: Text("Request Microphone Permission"),
),
ElevatedButton(
onPressed: startRecording,
child: Text("Start Recording"),
),
ElevatedButton(
onPressed: stopRecording,
child: Text("Stop Recording"),
),
ElevatedButton(
onPressed: playRecording,
child: Text("Play Recording"),
),
ElevatedButton(
onPressed: stopPlaying,
child: Text("Stop Playing"),
),
],
),
),
);
}
}