Flutter OCR识别插件flutter_nanonets_ocr的使用

发布于 1周前 作者 itying888 来自 Flutter

Flutter OCR识别插件flutter_nanonets_ocr的使用

Description

Nanonets 提供了一种OCR服务,我们可以创建模型、训练它们,并使用它们进行OCR预测。这个包旨在使我们能够直接使用Nanonets OCR API,而无需手动编写代码来将其集成到您的应用程序中。

请参考以下博客了解如何逐步将Nanonets OCR模型集成到Flutter应用程序中: Implementing Nanonets OCR in Flutter

依赖库

Get started

添加依赖

你可以使用以下命令添加 flutter_nanonets_ocr 作为最新稳定版本的依赖:

$ dart pub add flutter_nanonets_ocr

或者你可以在 pubspec.yaml 文件的 dependencies 部分手动添加 flutter_nanonets_ocr

dependencies:
    flutter_nanonets_ocr: ^0.0.14

示例代码

使用文档文件获取详情

import 'package:flutter_nanonets_ocr/flutter_nanonets_ocr.dart';

String apiKey = "INSERT NANONETS API KEY";

NanonetsOCR nanonetsOCR = NanonetsOCR();

FutureBuilder(
    future: nanonetsOCR.predictDocumentFile(apiKey, docImage, "INSERT OCR MODEL ID HERE", context),
    builder: (context, snapshot) {
        if (snapshot.connectionState == ConnectionState.active) {
            return loadingWidget();
        } else if (snapshot.connectionState == ConnectionState.done) {
            return yourSuccessWidget();
        } else {
            return loadingWidget();
        }
    });

使用文档URL获取详情

import 'package:flutter_nanonets_ocr/flutter_nanonets_ocr.dart';

String apiKey = "INSERT NANONETS API KEY";

NanonetsOCR nanonetsOCR = NanonetsOCR();

FutureBuilder(
    future: nanonetsOCR.predictDocumentURL(apiKey, docImageUrl, "INSERT OCR MODEL ID HERE", context),
    builder: (context, snapshot) {
        if (snapshot.connectionState == ConnectionState.active) {
            return loadingWidget();
        } else if (snapshot.connectionState == ConnectionState.done) {
            return yourSuccessWidget();
        } else {
            return loadingWidget();
        }
    });

完整示例 Demo

以下是一个完整的示例应用,演示如何在Flutter中使用 flutter_nanonets_ocr 插件:

// ignore_for_file: prefer_const_constructors

import 'dart:developer';
import 'dart:io';
import 'package:flutter/material.dart';
import 'package:image_picker/image_picker.dart';
import 'package:flutter_nanonets_ocr/flutter_nanonets_ocr.dart';

void main() {
  runApp(const MyApp());
}

class MyApp extends StatelessWidget {
  const MyApp({super.key});

  @override
  Widget build(BuildContext context) {
    return MaterialApp(
      debugShowCheckedModeBanner: false,
      title: 'Flutter Demo',
      theme: ThemeData(
        colorScheme: ColorScheme.fromSeed(seedColor: Colors.deepPurple),
        useMaterial3: true,
      ),
      home: const HomePage(),
    );
  }
}

class HomePage extends StatefulWidget {
  const HomePage({super.key});

  @override
  State<HomePage> createState() => _HomePageState();
}

class _HomePageState extends State<HomePage> {
  File? pickedImage;
  final String apiKey = "YOUR_API_KEY";
  final String modelId = "YOUR_MODEL_ID";

  Future<File?> pickImage(ImageSource imageType) async {
    try {
      final photo = await ImagePicker().pickImage(source: imageType, imageQuality: 100);
      if (photo == null) return null;
      File tempImage = File(photo.path);
      setState(() {
        pickedImage = tempImage;
      });
    } catch (error) {
      log(error.toString());
    }
    return pickedImage;
  }

  @override
  Widget build(BuildContext context) {
    return Scaffold(
      body: Container(
        alignment: Alignment.center,
        child: Column(
          mainAxisAlignment: MainAxisAlignment.center,
          crossAxisAlignment: CrossAxisAlignment.center,
          children: [
            Text(
              "Home OCR Predictor",
              style: TextStyle(color: Colors.black, fontWeight: FontWeight.bold, fontSize: 20),
            ),
            pickedImage == null
                ? Container()
                : Container(
                    padding: EdgeInsets.symmetric(horizontal: 5),
                    decoration: BoxDecoration(shape: BoxShape.rectangle),
                    child: ClipRRect(
                      borderRadius: BorderRadius.circular(8),
                      child: Image.file(
                        pickedImage!,
                        fit: BoxFit.contain,
                      ),
                    ),
                  ),
            SizedBox(height: 40),
            InkWell(
              onTap: () async {
                try {
                  await pickImage(ImageSource.gallery);
                  if (pickedImage != null) {
                    Navigator.of(context).push(
                      MaterialPageRoute(
                        builder: (context) => ReceiptPredictionScreen(image: pickedImage!),
                      ),
                    );
                  }
                } catch (err) {
                  log(err.toString());
                }
              },
              child: Container(
                height: 70,
                width: 300,
                alignment: Alignment.center,
                decoration: BoxDecoration(
                    color: Colors.white,
                    border: Border.all(color: Colors.black, width: 2),
                    borderRadius: BorderRadius.all(Radius.circular(30))),
                child: Text(
                  "Upload Passport Image",
                  textAlign: TextAlign.center,
                  style: TextStyle(color: Colors.black, fontSize: 20, fontWeight: FontWeight.bold),
                ),
              ),
            ),
          ],
        ),
      ),
    );
  }
}

class ReceiptPredictionScreen extends StatefulWidget {
  final File? image;

  const ReceiptPredictionScreen({super.key, required this.image});

  @override
  State<ReceiptPredictionScreen> createState() => _ReceiptPredictionScreenState();
}

class _ReceiptPredictionScreenState extends State<ReceiptPredictionScreen> {
  String result = '';

  @override
  void initState() {
    super.initState();
    _predictOCROnImage(widget.image);
  }

  Future<void> _predictOCROnImage(File? image) async {
    if (image == null) return;

    NanonetsOCR nanonetsOCR = NanonetsOCR();
    final prediction = await nanonetsOCR.predictDocumentFile(apiKey, image, modelId, context);

    setState(() {
      result = prediction.toString();
    });
  }

  @override
  Widget build(BuildContext context) {
    return Scaffold(
      appBar: AppBar(title: Text('OCR Prediction Result')),
      body: Center(
        child: Text(result.isNotEmpty ? result : 'Loading...'),
      ),
    );
  }
}

在这个示例中,我们首先从相册中选择一张图片,然后使用 flutter_nanonets_ocr 插件对该图片进行OCR识别,并展示识别结果。确保替换 "YOUR_API_KEY""YOUR_MODEL_ID" 为你自己的Nanonets API密钥和模型ID。


更多关于Flutter OCR识别插件flutter_nanonets_ocr的使用的实战系列教程也可以访问 https://www.itying.com/category-92-b0.html

1 回复

更多关于Flutter OCR识别插件flutter_nanonets_ocr的使用的实战系列教程也可以访问 https://www.itying.com/category-92-b0.html


当然,以下是如何在Flutter项目中集成并使用flutter_nanonets_ocr插件进行OCR(光学字符识别)的示例代码。

1. 添加依赖

首先,在你的pubspec.yaml文件中添加flutter_nanonets_ocr依赖:

dependencies:
  flutter:
    sdk: flutter
  flutter_nanonets_ocr: ^最新版本号  # 请替换为实际的最新版本号

然后运行flutter pub get来安装依赖。

2. 配置API密钥

在使用flutter_nanonets_ocr之前,你需要从Nanonets获取一个API密钥。注册并登录到Nanonets后,你可以创建一个应用并获取API密钥。

3. 编写OCR识别代码

以下是一个完整的Flutter应用示例,它使用flutter_nanonets_ocr插件来识别图像中的文本:

import 'package:flutter/material.dart';
import 'package:flutter_nanonets_ocr/flutter_nanonets_ocr.dart';
import 'dart:io';
import 'package:image_picker/image_picker.dart';

void main() {
  runApp(MyApp());
}

class MyApp extends StatelessWidget {
  @override
  Widget build(BuildContext context) {
    return MaterialApp(
      title: 'Flutter OCR Demo',
      theme: ThemeData(
        primarySwatch: Colors.blue,
      ),
      home: OcrScreen(),
    );
  }
}

class OcrScreen extends StatefulWidget {
  @override
  _OcrScreenState createState() => _OcrScreenState();
}

class _OcrScreenState extends State<OcrScreen> {
  String ocrResult = '';
  final ImagePicker _picker = ImagePicker();
  final FlutterNanonetsOcr _ocr = FlutterNanonetsOcr();

  Future<void> _pickImage() async {
    final XFile? image = await _picker.pickImage(source: ImageSource.gallery);
    if (image != null) {
      File file = File(image.path);
      _performOcr(file);
    }
  }

  Future<void> _performOcr(File imageFile) async {
    String apiKey = '你的Nanonets API密钥';  // 请替换为你的实际API密钥
    try {
      var result = await _ocr.recognizeText(
        apiKey: apiKey,
        imagePath: imageFile.path,
      );
      setState(() {
        ocrResult = result.data.join('\n');
      });
    } catch (e) {
      print('OCR Error: $e');
      setState(() {
        ocrResult = 'OCR识别失败: $e';
      });
    }
  }

  @override
  Widget build(BuildContext context) {
    return Scaffold(
      appBar: AppBar(
        title: Text('Flutter OCR Demo'),
      ),
      body: Center(
        child: Column(
          mainAxisAlignment: MainAxisAlignment.center,
          children: <Widget>[
            ElevatedButton(
              onPressed: _pickImage,
              child: Text('选择图片进行OCR识别'),
            ),
            SizedBox(height: 20),
            Text(
              ocrResult,
              style: TextStyle(fontSize: 18),
              textAlign: TextAlign.center,
            ),
          ],
        ),
      ),
    );
  }
}

4. 添加Image Picker依赖

由于示例代码中使用了image_picker插件来选择图片,因此你还需要在pubspec.yaml文件中添加该依赖:

dependencies:
  flutter:
    sdk: flutter
  flutter_nanonets_ocr: ^最新版本号  # 请替换为实际的最新版本号
  image_picker: ^最新版本号  # 请替换为实际的最新版本号

然后再次运行flutter pub get来安装依赖。

5. 运行应用

确保你已经正确配置了API密钥,然后运行你的Flutter应用。点击按钮选择一张图片,应用将会使用flutter_nanonets_ocr插件对图片进行OCR识别,并在界面上显示识别结果。

这个示例展示了如何在Flutter应用中集成并使用flutter_nanonets_ocr插件进行OCR识别。根据你的具体需求,你可以进一步定制和扩展这个示例。

回到顶部