Flutter ChatGPT集成插件chat_gpt_sdk_lululala的使用

Flutter ChatGPT集成插件chat_gpt_sdk_lululala的使用

ChatGPT应用与Flutter

ChatGPT 是由OpenAI于2022年11月发布的聊天机器人。它基于OpenAI的GPT-3.5系列大型语言模型,并通过监督学习和强化学习技术进行微调。

非官方插件

"社区维护"库。

OpenAI强大的库支持GPT-4

功能特性

  • 安装包
  • 创建OpenAI实例
  • 更改访问令牌
  • 文本完成
  • 聊天完成(GPT-4和GPT-3.5)
  • 助手API
  • 线程
  • 消息
  • 运行
  • 错误处理
  • 示例问答
  • 基于提示生成图像
  • 编辑
  • 取消生成
  • 文件
  • 音频
  • 嵌入
  • 微调
  • 审核
  • 模型与引擎
  • 翻译示例
  • 视频教程
  • 文档支持泰语

安装包

chat_gpt_sdk_lululala: 3.1.2

创建OpenAI实例

final openAI = OpenAI.instance.build(token: token, baseOption: HttpSetup(receiveTimeout: const Duration(seconds: 5)), enableLog: true);

更改访问令牌

openAI.setToken('new-access-token');
///获取令牌
openAI.token;

文本完成

特性完成
void _translateEngToThai() async {
  final request = CompleteText(
    prompt: translateEngToThai(word: _txtWord.text.toString()),
    maxToken: 200,
    model: TextDavinci3Model(),
  );

  final response = await openAI.onCompletion(request: request);
  
  ///取消请求
  openAI.cancelAIGenerate();
  print(response);
}
使用FutureBuilder
Future<CTResponse?>? _translateFuture;

_translateFuture = openAI.onCompletion(request: request);

///UI代码
FutureBuilder<CTResponse?>(
  future: _translateFuture,
  builder: (context, snapshot) {
    final data = snapshot.data;
    if (snapshot.connectionState == ConnectionState.done) return Text('Done');
    if (snapshot.connectionState == ConnectionState.waiting) return CircularProgressIndicator();
    return Text(data?.choices.last.text ?? 'Loading...');
  }
)

聊天完成(GPT-4和GPT-3.5)

聊天完成
void chatComplete() async {
  final request = ChatCompleteText(
    messages: [
      Map.of({"role": "user", "content": 'Hello!'})
    ],
    maxToken: 200,
    model: Gpt4ChatModel(),
  );

  final response = await openAI.onChatCompletion(request: request);
  for (var element in response!.choices) {
    print("data -> ${element.message?.content}");
  }
}
使用SSE
void chatCompleteWithSSE() {
  final request = ChatCompleteText(
    messages: [
      Map.of({"role": "user", "content": 'Hello!'})
    ],
    maxToken: 200,
    model: Gpt4ChatModel(),
  );

  openAI.onChatCompletionSSE(request: request).listen((it) {
    debugPrint(it.choices.last.message?.content);
  });
}
支持函数调用
void gptFunctionCalling() async {
  final request = ChatCompleteText(
    messages: [
      Messages(
        role: Role.user,
        content: "What is the weather like in Boston?",
        name: "get_current_weather",
      ),
    ],
    maxToken: 200,
    model: Gpt41106PreviewChatModel(),
    tools: [
      {
        "type": "function",
        "function": {
          "name": "get_current_weather",
          "description": "Get the current weather in a given location",
          "parameters": {
            "type": "object",
            "properties": {
              "location": {
                "type": "string",
                "description": "The city and state, e.g. San Francisco, CA"
              },
              "unit": {
                "type": "string",
                "enum": ["celsius", "fahrenheit"]
              }
            },
            "required": ["location"]
          }
        }
      }
    ],
    toolChoice: 'auto',
  );

  ChatCTResponse? response = await openAI.onChatCompletion(request: request);
}
图像输入
void imageInput() async {
  final request = ChatCompleteText(
    messages: [
      {
        "role": "user",
        "content": [
          {"type": "text", "text": "What’s in this image?"},
          {
            "type": "image_url",
            "image_url": {"url": "image-url"}
          }
        ]
      }
    ],
    maxToken: 200,
    model: Gpt4VisionPreviewChatModel(),
  );

  ChatCTResponse? response = await openAI.onChatCompletion(request: request);
  debugPrint("$response");
}

助手API

创建助手
void createAssistant() async {
  final assistant = Assistant(
    model: Gpt4AModel(),
    name: 'Math Tutor',
    instructions: 'You are a personal math tutor. When asked a question, write and run Python code to answer the question.',
    tools: [
      {
        "type": "code_interpreter",
      }
    ],
  );
  await openAI.assistant.create(assistant: assistant);
}
列出助手
void listAssistant() async {
  final assistants = await openAI.assistant.list();
  assistants.map((e) => e.toJson()).forEach(print);
}

线程

创建线程
void createThreads() async {
  final request = ThreadRequest(messages: [
    {
      "role": "user",
      "content": "Hello, what is AI?",
      "file_ids": ["file-abc123"]
    },
    {
      "role": "user",
      "content": "How does AI work? Explain it in simple terms."
    },
  ]);

  await openAI.threads.createThread(request: request);
}
修改线程
void modifyThread() async {
  await openAI.threads.modifyThread(
    threadId: 'threadId',
    metadata: {
      "metadata": {
        "modified": "true",
        "user": "abc123",
      },
    },
  );
}

消息

创建消息
void createMessage() async {
  final request = CreateMessage(
    role: 'user',
    content: 'How does AI work? Explain it in simple terms.',
  );
  await openAI.threads.messages.createMessage(
    threadId: 'threadId',
    request: request,
  );
}

运行

创建运行
void createRun() async {
  final request = CreateRun(assistantId: 'assistantId');
  await openAI.threads.runs.createRun(
    threadId: 'threadId',
    request: request,
  );
}

错误处理

///使用catchError
openAI.onCompletion(request: request)
  .catchError((err) {
    if (err is OpenAIAuthError) {
      print('OpenAIAuthError error ${err.data?.error?.toMap()}');
    }
    if (err is OpenAIRateLimitError) {
      print('OpenAIRateLimitError error ${err.data?.error?.toMap()}');
    }
    if (err is OpenAIServerError) {
      print('OpenAIServerError error ${err.data?.error?.toMap()}');
    }
  });

///使用try catch
try {
  await openAI.onCompletion(request: request);
} on OpenAIRateLimitError catch (err) {
  print('catch error ->${err.data?.error?.toMap()}');
}

///使用stream
openAI
  .onCompletionSSE(request: request)
  .transform(StreamTransformer.fromHandlers(
    handleError: (error, stackTrace, sink) {
      if (error is OpenAIRateLimitError) {
        print('OpenAIRateLimitError error ->${error.data?.message}');
      }
  }))
  .listen((event) {
    print("success");
  });

示例问答

final request = CompleteText(
  prompt: 'What is human life expectancy in the United States?',
  model: TextDavinci3Model(),
  maxTokens: 200
);

final response = await openAI.onCompletion(request: request);

基于提示生成图像

生成图像
void _generateImage() {
  const prompt = "cat eating snake blue red.";

  final request = GenerateImage(
    model: DallE2(),
    prompt,
    1,
    size: ImageSize.size256,
    responseFormat: Format.url,
  );
  final response = openAI.generateImage(request);
  print("img url :${response.data?.last?.url}");
}

编辑

编辑提示
void editPrompt() async {
  final response = await openAI.editor.prompt(EditRequest(
    model: CodeEditModel(),
    input: 'What day of the week is it?',
    instruction: 'Fix the spelling mistakes'
  ));

  print(response.choices.last.text);
}

取消生成

取消生成提示
_openAI
  .onChatCompletionSSE(request: request, onCancel: onCancel);

///CancelData
CancelData? mCancel;
void onCancel(CancelData cancelData) {
  mCancel = cancelData;
}

mCancel?.cancelToken.cancel("canceled ");

文件

获取文件
void getFile() async {
  final response = await openAI.file.get();
  print(response.data);
}
上传文件
void uploadFile() async {
  final request = UploadFile(
    file: FileInfo('file-path', 'file-name'),
    purpose: 'fine-tune'
  );
  final response = await openAI.file.uploadFile(request);
  print(response);
}

音频

音频转录
void audioTranscribe() async {
  final mAudio = File('mp3-path');
  final request = AudioRequest(
    file: FileInfo(mAudio.path, 'name'),
    prompt: '...'
  );

  final response = await openAI.audio.transcribes(request);
}

微调

创建微调任务
void createTineTune() async {
  final request = CreateFineTuneJob(trainingFile: 'The ID of an uploaded file');
  final response = await openAI.fineTune.createFineTuneJob(request);
}

审核

创建审核
void createModeration() async {
  final response = await openAI.moderation.create(
    input: 'input',
    model: TextLastModerationModel()
  );
}

模型与引擎

列出模型
final models = await openAI.listModel();

更多关于Flutter ChatGPT集成插件chat_gpt_sdk_lululala的使用的实战教程也可以访问 https://www.itying.com/category-92-b0.html

1 回复

更多关于Flutter ChatGPT集成插件chat_gpt_sdk_lululala的使用的实战系列教程也可以访问 https://www.itying.com/category-92-b0.html


当然,以下是一个关于如何在Flutter项目中集成并使用chat_gpt_sdk_lululala插件的示例代码。这个插件假设是用来与ChatGPT进行交互的。请注意,由于这个插件并非官方或广泛认可的库,具体的API和用法可能会有所不同,以下代码仅作为示例,具体实现可能需要根据实际情况进行调整。

首先,确保你已经在pubspec.yaml文件中添加了chat_gpt_sdk_lululala依赖:

dependencies:
  flutter:
    sdk: flutter
  chat_gpt_sdk_lululala: ^最新版本号  # 替换为实际的最新版本号

然后,运行flutter pub get来获取依赖。

接下来,在你的Flutter项目中,你可以按照以下方式使用这个插件:

import 'package:flutter/material.dart';
import 'package:chat_gpt_sdk_lululala/chat_gpt_sdk_lululala.dart'; // 假设这是正确的导入路径

void main() {
  runApp(MyApp());
}

class MyApp extends StatelessWidget {
  @override
  Widget build(BuildContext context) {
    return MaterialApp(
      home: ChatScreen(),
    );
  }
}

class ChatScreen extends StatefulWidget {
  @override
  _ChatScreenState createState() => _ChatScreenState();
}

class _ChatScreenState extends State<ChatScreen> {
  final ChatGptClient _chatGptClient = ChatGptClient(apiKey: '你的API密钥'); // 替换为你的API密钥
  TextEditingController _controller = TextEditingController();
  List<String> _chatHistory = [];

  @override
  Widget build(BuildContext context) {
    return Scaffold(
      appBar: AppBar(
        title: Text('ChatGPT Chat'),
      ),
      body: Padding(
        padding: const EdgeInsets.all(8.0),
        child: Column(
          children: [
            Expanded(
              child: ListView.builder(
                itemCount: _chatHistory.length,
                itemBuilder: (context, index) {
                  return Padding(
                    padding: const EdgeInsets.symmetric(vertical: 4.0),
                    child: Text(
                      _chatHistory[index],
                      style: TextStyle(
                        color: _chatHistory[index].startsWith('你:') ? Colors.blue : Colors.grey,
                      ),
                    ),
                  );
                },
              ),
            ),
            TextField(
              controller: _controller,
              decoration: InputDecoration(
                border: OutlineInputBorder(),
                labelText: '输入你的问题',
                suffixIcon: IconButton(
                  icon: Icon(Icons.send),
                  onPressed: () async {
                    String message = '你: $_controller.text';
                    setState(() {
                      _chatHistory.add(message);
                      _controller.clear();
                    });

                    try {
                      ChatResponse response = await _chatGptClient.sendMessage(_controller.text);
                      setState(() {
                        _chatHistory.add('ChatGPT: ${response.message}');
                      });
                    } catch (e) {
                      setState(() {
                        _chatHistory.add('错误: $e');
                      });
                    }
                  },
                ),
              ),
            ),
          ],
        ),
      ),
    );
  }
}

// 假设ChatGptClient和ChatResponse是这样的(实际可能不同,需要参考插件文档)
class ChatGptClient {
  final String apiKey;

  ChatGptClient({required this.apiKey});

  Future<ChatResponse> sendMessage(String message) async {
    // 这里应该是发送HTTP请求到ChatGPT API的代码
    // 由于API的具体实现未知,这里仅作为示例
    // 实际上,你可能需要使用http或dio库来发送POST请求
    // 并解析返回的JSON数据
    return ChatResponse(message: '这是ChatGPT的回复: $message'); // 示例返回
  }
}

class ChatResponse {
  final String message;

  ChatResponse({required this.message});
}

请注意:

  1. ChatGptClientChatResponse类的实现是假设的,你需要根据chat_gpt_sdk_lululala插件的实际API来调整。
  2. API密钥应该妥善保管,不要硬编码在客户端代码中,可以考虑使用环境变量或安全存储服务。
  3. 错误处理应该更加健全,这里只是简单地捕获并显示了异常。
  4. 由于chat_gpt_sdk_lululala并非一个广为人知的库,上述代码可能需要根据实际的库文档进行调整。

务必参考该插件的官方文档或源代码以获取准确的API使用方法和参数。

回到顶部