diff --git a/.gitignore b/.gitignore
index 16871cc8..f63fe0df 100644
--- a/.gitignore
+++ b/.gitignore
@@ -26,3 +26,4 @@ local.properties
# Visual Studio Code
.vscode/
+.gradle
diff --git a/README.md b/README.md
index 40d3dabe..38c21934 100644
--- a/README.md
+++ b/README.md
@@ -1,263 +1,207 @@
-![CircleCI branch][circle-ci-badge]
-[![npm][npm]][npm-url]
+# React Native Voice
-
React Native Voice
-A speech-to-text library for React Native.
+[](https://npmjs.com/package/@dev-amirzubair/react-native-voice)
-
-
+A speech-to-text library for [React Native](https://reactnative.dev/) with **New Architecture (Fabric/TurboModules)** and **Bridgeless mode** support.
-```sh
-yarn add @react-native-voice/voice
-
-# or
-
-npm i @react-native-voice/voice --save
-```
-
-Link the iOS package
-
-```sh
-npx pod-install
-```
+> **Note:** This library is a fork of [@react-native-voice/voice](https://github.com/react-native-voice/voice) with custom fixes for React Native 0.76+ and the New Architecture. [View source on GitHub](https://github.com/dev-amirzubair/voice)
-## Table of contents
+## What's Different?
-- [Linking](#linking)
- - [Manually Link Android](#manually-link-android)
- - [Manually Link iOS](#manually-link-ios)
-- [Prebuild Plugin](#prebuild-plugin)
-- [Usage](#usage)
- - [Example](#example)
-- [API](#api)
-- [Events](#events)
-- [Permissions](#permissions)
- - [Android](#android)
- - [iOS](#ios)
-- [Contributors](#contributors)
+This fork includes the following improvements over the original library:
-Linking
+- ✅ **New Architecture Support** - Works with Fabric and TurboModules
+- ✅ **Bridgeless Mode** - Full support for React Native's Bridgeless mode
+- ✅ **React Native 0.76+** - Tested and working with the latest RN versions
+- ✅ **Fixed Android Event Emission** - Events properly reach JavaScript in new architecture
+- ✅ **Fixed iOS TurboModule Registration** - Proper fallback handling for iOS
+- ✅ **Improved Locale Handling** - Better support for Indo-Pak region languages
+- ✅ **Clean TypeScript Types** - Updated type definitions
-Manually or automatically link the NativeModule
+## Installation
```sh
-react-native link @react-native-voice/voice
-```
-
-### Manually Link Android
+yarn add @dev-amirzubair/react-native-voice
-- In `android/setting.gradle`
+# or
-```gradle
-...
-include ':@react-native-voice_voice', ':app'
-project(':@react-native-voice_voice').projectDir = new File(rootProject.projectDir, '../node_modules/@react-native-voice/voice/android')
+npm install @dev-amirzubair/react-native-voice --save
```
-- In `android/app/build.gradle`
+### iOS Setup
-```gradle
-...
-dependencies {
- ...
- compile project(':@react-native-voice_voice')
-}
+```sh
+cd ios && pod install
```
-- In `MainApplication.java`
-
-```java
-
-import android.app.Application;
-import com.facebook.react.ReactApplication;
-import com.facebook.react.ReactPackage;
-...
-import com.wenkesj.voice.VoicePackage; // <------ Add this!
-...
-
-public class MainActivity extends Activity implements ReactApplication {
-...
- @Override
- protected List getPackages() {
- return Arrays.asList(
- new MainReactPackage(),
- new VoicePackage() // <------ Add this!
- );
- }
-}
-```
+### Android Setup
-### Manually Link iOS
+No additional setup required - autolinking handles everything.
-- Drag the Voice.xcodeproj from the @react-native-voice/voice/ios folder to the Libraries group on Xcode in your poject. [Manual linking](https://reactnative.dev/docs/linking-libraries-ios.html)
+## Usage
-- Click on your main project file (the one that represents the .xcodeproj) select Build Phases and drag the static library, lib.Voice.a, from the Libraries/Voice.xcodeproj/Products folder to Link Binary With Libraries
+```javascript
+import Voice from '@dev-amirzubair/react-native-voice';
-Prebuild Plugin
+// Set up event handlers
+Voice.onSpeechStart = () => console.log('Speech started');
+Voice.onSpeechEnd = () => console.log('Speech ended');
+Voice.onSpeechResults = (e) => console.log('Results:', e.value);
+Voice.onSpeechPartialResults = (e) => console.log('Partial:', e.value);
+Voice.onSpeechError = (e) => console.log('Error:', e.error);
-> This package cannot be used in the "Expo Go" app because [it requires custom native code](https://docs.expo.io/workflow/customizing/).
+// Start listening
+await Voice.start('en-US');
-After installing this npm package, add the [config plugin](https://docs.expo.io/guides/config-plugins/) to the [`plugins`](https://docs.expo.io/versions/latest/config/app/#plugins) array of your `app.json` or `app.config.js`:
+// Stop listening
+await Voice.stop();
-```json
-{
- "expo": {
- "plugins": ["@react-native-voice/voice"]
- }
-}
+// Clean up
+await Voice.destroy();
```
-Next, rebuild your app as described in the ["Adding custom native code"](https://docs.expo.io/workflow/customizing/) guide.
-
-### Props
-
-The plugin provides props for extra customization. Every time you change the props or plugins, you'll need to rebuild (and `prebuild`) the native app. If no extra properties are added, defaults will be used.
+### Full Example
-- `speechRecognition` (_string | false_): Sets the message for the `NSSpeechRecognitionUsageDescription` key in the `Info.plist` message. When undefined, a default permission message will be used. When `false`, the permission will be skipped.
-- `microphone` (_string | false_): Sets the message for the `NSMicrophoneUsageDescription` key in the `Info.plist`. When undefined, a default permission message will be used. When `false`, the `android.permission.RECORD_AUDIO` will not be added to the `AndroidManifest.xml` and the iOS permission will be skipped.
-
-### Example
+```javascript
+import React, { useEffect, useState, useCallback } from 'react';
+import { View, Text, Button } from 'react-native';
+import Voice from '@dev-amirzubair/react-native-voice';
+
+function SpeechToText() {
+ const [results, setResults] = useState([]);
+ const [isListening, setIsListening] = useState(false);
+
+ useEffect(() => {
+ Voice.onSpeechStart = () => setIsListening(true);
+ Voice.onSpeechEnd = () => setIsListening(false);
+ Voice.onSpeechResults = (e) => setResults(e.value ?? []);
+ Voice.onSpeechError = (e) => console.error(e.error);
+
+ return () => {
+ Voice.destroy().then(Voice.removeAllListeners);
+ };
+ }, []);
+
+ const startListening = async () => {
+ try {
+ await Voice.start('en-US');
+ } catch (e) {
+ console.error(e);
+ }
+ };
-```json
-{
- "plugins": [
- [
- "@react-native-voice/voice",
- {
- "microphonePermission": "CUSTOM: Allow $(PRODUCT_NAME) to access the microphone",
- "speechRecognitionPermission": "CUSTOM: Allow $(PRODUCT_NAME) to securely recognize user speech"
- }
- ]
- ]
+ const stopListening = async () => {
+ try {
+ await Voice.stop();
+ } catch (e) {
+ console.error(e);
+ }
+ };
+
+ return (
+
+ {isListening ? '🎤 Listening...' : 'Press Start'}
+ {results.join(' ')}
+
+
+
+ );
}
```
-Usage
+## API
+
+| Method | Description | Platform |
+|--------|-------------|----------|
+| `Voice.isAvailable()` | Check if speech recognition is available | Android, iOS |
+| `Voice.start(locale)` | Start listening for speech | Android, iOS |
+| `Voice.stop()` | Stop listening | Android, iOS |
+| `Voice.cancel()` | Cancel speech recognition | Android, iOS |
+| `Voice.destroy()` | Destroy the recognizer instance | Android, iOS |
+| `Voice.removeAllListeners()` | Remove all event listeners | Android, iOS |
+| `Voice.isRecognizing()` | Check if currently recognizing | Android, iOS |
+| `Voice.getSpeechRecognitionServices()` | Get available speech engines | Android only |
+
+## Events
+
+| Event | Description | Data |
+|-------|-------------|------|
+| `onSpeechStart` | Speech recognition started | `{ error: false }` |
+| `onSpeechEnd` | Speech recognition ended | `{ error: false }` |
+| `onSpeechResults` | Final results received | `{ value: ['recognized text'] }` |
+| `onSpeechPartialResults` | Partial results (live) | `{ value: ['partial text'] }` |
+| `onSpeechError` | Error occurred | `{ error: { code, message } }` |
+| `onSpeechVolumeChanged` | Volume/pitch changed | `{ value: number }` |
+| `onSpeechRecognized` | Speech was recognized | `{ isFinal: boolean }` |
+
+## Permissions
-Full example for Android and iOS.
+### Android
-### Example
+Add to `AndroidManifest.xml`:
-```javascript
-import Voice from '@react-native-voice/voice';
-import React, {Component} from 'react';
-
-class VoiceTest extends Component {
- constructor(props) {
- Voice.onSpeechStart = this.onSpeechStartHandler.bind(this);
- Voice.onSpeechEnd = this.onSpeechEndHandler.bind(this);
- Voice.onSpeechResults = this.onSpeechResultsHandler.bind(this);
- }
- onStartButtonPress(e){
- Voice.start('en-US');
- }
- ...
-}
+```xml
+
```
-API
+The library automatically requests permission when starting recognition.
-Static access to the Voice API.
+### iOS
-**All methods _now_ return a `new Promise` for `async/await` compatibility.**
+Add to `Info.plist`:
-| Method Name | Description | Platform |
-| ------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
-| Voice.isAvailable() | Checks whether a speech recognition service is available on the system. | Android, iOS |
-| Voice.start(locale) | Starts listening for speech for a specific locale. Returns null if no error occurs. | Android, iOS |
-| Voice.stop() | Stops listening for speech. Returns null if no error occurs. | Android, iOS |
-| Voice.cancel() | Cancels the speech recognition. Returns null if no error occurs. | Android, iOS |
-| Voice.destroy() | Destroys the current SpeechRecognizer instance. Returns null if no error occurs. | Android, iOS |
-| Voice.removeAllListeners() | Cleans/nullifies overridden `Voice` static methods. | Android, iOS |
-| Voice.isRecognizing() | Return if the SpeechRecognizer is recognizing. | Android, iOS |
-| Voice.getSpeechRecognitionServices() | Returns a list of the speech recognition engines available on the device. (Example: `['com.google.android.googlequicksearchbox']` if Google is the only one available.) | Android |
+```xml
+NSMicrophoneUsageDescription
+This app needs microphone access for speech recognition
+NSSpeechRecognitionUsageDescription
+This app needs speech recognition access
+```
-Events
+## Platform Notes
-Callbacks that are invoked when a native event emitted.
+### Android
+- Auto-stops after user stops speaking
+- Requires Google Search app for speech recognition on most devices
+- Check available services with `Voice.getSpeechRecognitionServices()`
-| Event Name | Description | Event | Platform |
-| ----------------------------------- | ------------------------------------------------------ | ----------------------------------------------- | ------------ |
-| Voice.onSpeechStart(event) | Invoked when `.start()` is called without error. | `{ error: false }` | Android, iOS |
-| Voice.onSpeechRecognized(event) | Invoked when speech is recognized. | `{ error: false }` | Android, iOS |
-| Voice.onSpeechEnd(event) | Invoked when SpeechRecognizer stops recognition. | `{ error: false }` | Android, iOS |
-| Voice.onSpeechError(event) | Invoked when an error occurs. | `{ error: Description of error as string }` | Android, iOS |
-| Voice.onSpeechResults(event) | Invoked when SpeechRecognizer is finished recognizing. | `{ value: [..., 'Speech recognized'] }` | Android, iOS |
-| Voice.onSpeechPartialResults(event) | Invoked when any results are computed. | `{ value: [..., 'Partial speech recognized'] }` | Android, iOS |
-| Voice.onSpeechVolumeChanged(event) | Invoked when pitch that is recognized changed. | `{ value: pitch in dB }` | Android |
+### iOS
+- Does NOT auto-stop - call `Voice.stop()` when done
+- Speech recognition only works on **physical devices** (not simulators)
+- Requires iOS 10+
-Permissions
+## Expo Support
-Arguably the most important part.
+This library requires custom native code and cannot be used with Expo Go. Use a [development build](https://docs.expo.dev/develop/development-builds/introduction/) or eject.
-### Android
+Add to your `app.json`:
-While the included `VoiceTest` app works without explicit permissions checks and requests, it may be necessary to add a permission request for `RECORD_AUDIO` for some configurations.
-Since Android M (6.0), [user need to grant permission at runtime (and not during app installation)](https://developer.android.com/training/permissions/requesting.html).
-By default, calling the `startSpeech` method will invoke `RECORD AUDIO` permission popup to the user. This can be disabled by passing `REQUEST_PERMISSIONS_AUTO: true` in the options argument.
-
-If you're running an ejected expo/expokit app, you may run into issues with permissions on Android and get the following error `host.exp.exponent.MainActivity cannot be cast to com.facebook.react.ReactActivity startSpeech`. This can be resolved by prompting for permssion using the `expo-permission` package before starting recognition.
-
-```js
-import { Permissions } from "expo";
-async componentDidMount() {
- const { status, expires, permissions } = await Permissions.askAsync(
- Permissions.AUDIO_RECORDING
- );
- if (status !== "granted") {
- //Permissions not granted. Don't show the start recording button because it will cause problems if it's pressed.
- this.setState({showRecordButton: false});
- } else {
- this.setState({showRecordButton: true});
- }
+```json
+{
+ "expo": {
+ "plugins": ["@dev-amirzubair/react-native-voice"]
+ }
}
```
-**Notes on Android**
-
-Even after all the permissions are correct in Android, there is one last thing to make sure this libray is working fine on Android. Please make sure the device has Google Speech Recognizing Engine such as `com.google.android.googlequicksearchbox` by calling `Voice.getSpeechRecognitionServices()`. Since Android phones can be configured with so many options, even if a device has googlequicksearchbox engine, it could be configured to use other services. You can check which serivce is used for Voice Assistive App in following steps for most Android phones:
-
-`Settings > App Management > Default App > Assistive App and Voice Input > Assistive App`
+## Troubleshooting
-Above flow can vary depending on the Android models and manufactures. For Huawei phones, there might be a chance that the device cannot install Google Services.
+### Android: No speech recognition services found
+Install the [Google Search app](https://play.google.com/store/apps/details?id=com.google.android.googlequicksearchbox) from Play Store.
-**How can I get `com.google.android.googlequicksearchbox` in the device?**
+### iOS: Speech recognition not working on simulator
+Use a physical iOS device - simulators don't support speech recognition.
-Please ask users to install [Google Search App](https://play.google.com/store/apps/details?id=com.google.android.googlequicksearchbox&hl=en).
+### Events not firing
+Make sure you set up event handlers **before** calling `Voice.start()`.
-### iOS
-
-Need to include permissions for `NSMicrophoneUsageDescription` and `NSSpeechRecognitionUsageDescription` inside Info.plist for iOS. See the included `VoiceTest` for how to handle these cases.
+## Credits
-```xml
-
- ...
- NSMicrophoneUsageDescription
- Description of why you require the use of the microphone
- NSSpeechRecognitionUsageDescription
- Description of why you require the use of the speech recognition
- ...
-
-```
+This library is based on [@react-native-voice/voice](https://github.com/react-native-voice/voice) by the React Native Voice contributors. Special thanks to:
-Please see the documentation provided by ReactNative for this: [PermissionsAndroid](https://reactnative.dev/docs/permissionsandroid.html)
-
-[npm]: https://img.shields.io/npm/v/@react-native-voice/voice.svg?style=flat-square
-[npm-url]: https://npmjs.com/package/@react-native-voice/voice
-[circle-ci-badge]: https://img.shields.io/circleci/project/github/react-native-voice/voice/master.svg?style=flat-square
+- @wenkesj (original author)
+- @jamsch
+- All original contributors
-Contributors
+## License
-- @asafron
-- @BrendanFDMoore
-- @brudny
-- @chitezh
-- @ifsnow
-- @jamsch
-- @misino
-- @Noitidart
-- @ohtangza & @hayanmind
-- @rudiedev6
-- @tdonia
-- @wenkesj
+MIT
diff --git a/android/build.gradle b/android/build.gradle
index 1f9c6f56..784a3af8 100644
--- a/android/build.gradle
+++ b/android/build.gradle
@@ -50,12 +50,6 @@ def supportsNamespace() {
android {
if (supportsNamespace()) {
namespace "com.wenkesj.voice"
-
- sourceSets {
- main {
- manifest.srcFile "src/main/AndroidManifestNew.xml"
- }
- }
}
compileSdkVersion getExtOrIntegerDefault("compileSdkVersion")
@@ -115,6 +109,7 @@ dependencies {
//noinspection GradleDynamicVersion
implementation "com.facebook.react:react-native:+"
implementation "org.jetbrains.kotlin:kotlin-stdlib:$kotlin_version"
+ implementation "androidx.core:core-ktx:1.9.0"
}
if (isNewArchitectureEnabled()) {
diff --git a/android/gradle.properties b/android/gradle.properties
index b3510205..ff2af63e 100644
--- a/android/gradle.properties
+++ b/android/gradle.properties
@@ -1,5 +1,5 @@
-Voice_kotlinVersion=1.7.0
-Voice_minSdkVersion=21
-Voice_targetSdkVersion=31
-Voice_compileSdkVersion=31
-Voice_ndkversion=21.4.7075529
+Voice_kotlinVersion=1.9.24
+Voice_minSdkVersion=24
+Voice_targetSdkVersion=34
+Voice_compileSdkVersion=35
+Voice_ndkversion=26.1.10909125
diff --git a/android/src/main/VoiceSpec.kt b/android/src/main/VoiceSpec.kt
deleted file mode 100644
index 3f0c1ce6..00000000
--- a/android/src/main/VoiceSpec.kt
+++ /dev/null
@@ -1,55 +0,0 @@
-package com.wenkesj.voice
-
-import com.facebook.react.bridge.Callback
-import com.facebook.react.bridge.Promise
-import com.facebook.react.bridge.ReactApplicationContext
-import com.facebook.react.bridge.ReadableMap
-
-abstract class VoiceSpec internal constructor(context: ReactApplicationContext) :
- NativeVoiceAndroidSpec(context) {
- private val voice = Voice(context)
-
- override fun destroySpeech(callback: Callback) {
- voice.destroySpeech(callback)
- }
-
- override fun startSpeech(locale: String, opts: ReadableMap, callback: Callback) {
- voice.startSpeech(locale,opts,callback)
- }
-
- override fun stopSpeech(callback: Callback) {
- voice.stopSpeech(callback)
- }
-
- override fun cancelSpeech(callback: Callback) {
- voice.cancelSpeech(callback)
- }
-
- override fun isSpeechAvailable(callback: Callback) {
- voice.isSpeechAvailable(callback)
- }
-
- override fun getSpeechRecognitionServices(promise: Promise) {
- voice.getSpeechRecognitionServices(promise)
- }
-
- override fun isRecognizing(callback: Callback) {
- voice.isRecognizing(callback)
- }
-
- override fun addListener(eventType: String) {
-
- }
-
- override fun removeListeners(count: Double) {
-
- }
-
- override fun getName(): String {
- return NAME
- }
-
- companion object {
- const val NAME = "Voice"
- }
-}
diff --git a/android/src/main/java/com/wenkesj/voice/Voice.kt b/android/src/main/java/com/wenkesj/voice/Voice.kt
index 58ba07b4..847e0087 100644
--- a/android/src/main/java/com/wenkesj/voice/Voice.kt
+++ b/android/src/main/java/com/wenkesj/voice/Voice.kt
@@ -13,6 +13,7 @@ import android.speech.RecognizerIntent
import android.speech.SpeechRecognizer
import android.util.Log
import androidx.annotation.Nullable
+import androidx.core.content.ContextCompat
import com.facebook.react.bridge.Arguments
import com.facebook.react.bridge.Callback
import com.facebook.react.bridge.Promise
@@ -24,25 +25,67 @@ import com.facebook.react.modules.core.PermissionAwareActivity
import java.util.Locale
-class Voice (context:ReactApplicationContext):RecognitionListener {
+/**
+ * Voice - Android Speech Recognition implementation
+ * Handles speech-to-text using Android's SpeechRecognizer API
+ * Events are emitted via RCTDeviceEventEmitter to JavaScript
+ */
+class Voice(context: ReactApplicationContext) : RecognitionListener {
val reactContext: ReactApplicationContext = context
private var speech: SpeechRecognizer? = null
private var isRecognizing = false
private var locale: String? = null
private fun getLocale(locale: String?): String {
- if (locale != null && locale != "") {
- return locale
+ if (locale != null && locale.isNotEmpty()) {
+ // Normalize locale format (e.g., "id" -> "id-ID", "ur" -> "ur-PK")
+ val normalizedLocale = normalizeLocale(locale)
+ Log.d("ASR", "Using provided locale: $locale -> $normalizedLocale")
+ return normalizedLocale
}
- return Locale.getDefault().toString()
+ val defaultLocale = Locale.getDefault().toString()
+ Log.d("ASR", "Using default locale: $defaultLocale")
+ return defaultLocale
+ }
+
+ private fun normalizeLocale(locale: String): String {
+ // Handle common locale formats
+ val parts = locale.split("-", "_")
+ val language = parts[0].lowercase()
+
+ // Map common language codes to full locale codes
+ val localeMap = mapOf(
+ "id" to "id-ID", // Indonesian
+ "in" to "id-ID", // Indonesian (alternative)
+ "ur" to "ur-PK", // Urdu (Pakistan)
+ "hi" to "hi-IN", // Hindi
+ "en" to "en-US", // English (default to US)
+ "ms" to "ms-MY", // Malay (Malaysia)
+ "ar" to "ar-SA", // Arabic
+ "bn" to "bn-BD", // Bengali
+ )
+
+ // If it's already in format "lang-COUNTRY", return as is
+ if (parts.size >= 2) {
+ return locale.replace("_", "-")
+ }
+
+ // Otherwise, try to map it
+ return localeMap[language] ?: locale
}
private fun startListening(opts: ReadableMap) {
+ Log.d("ASR", "startListening called with locale: ${this.locale}")
if (speech != null) {
speech?.destroy()
speech = null
}
+ // Check if speech recognition is available
+ if (!SpeechRecognizer.isRecognitionAvailable(this.reactContext)) {
+ throw Exception("Speech recognition is not available on this device")
+ }
+
speech = if (opts.hasKey("RECOGNIZER_ENGINE")) {
when (opts.getString("RECOGNIZER_ENGINE")) {
"GOOGLE" -> {
@@ -58,30 +101,42 @@ class Voice (context:ReactApplicationContext):RecognitionListener {
SpeechRecognizer.createSpeechRecognizer(this.reactContext)
}
+ if (speech == null) {
+ throw Exception("Failed to create SpeechRecognizer")
+ }
+
speech?.setRecognitionListener(this)
+ Log.d("ASR", "startListening() - RecognitionListener set")
val intent = Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH)
+ Log.d("ASR", "startListening() - Intent created")
+
+ // Set default language model if not specified
+ var languageModelSet = false
// Load the intent with options from JS
val iterator = opts.keySetIterator()
while (iterator.hasNextKey()) {
val key = iterator.nextKey()
when (key) {
- "EXTRA_LANGUAGE_MODEL" -> when (opts.getString(key)) {
- "LANGUAGE_MODEL_FREE_FORM" -> intent.putExtra(
- RecognizerIntent.EXTRA_LANGUAGE_MODEL,
- RecognizerIntent.LANGUAGE_MODEL_FREE_FORM
- )
-
- "LANGUAGE_MODEL_WEB_SEARCH" -> intent.putExtra(
- RecognizerIntent.EXTRA_LANGUAGE_MODEL,
- RecognizerIntent.LANGUAGE_MODEL_WEB_SEARCH
- )
-
- else -> intent.putExtra(
- RecognizerIntent.EXTRA_LANGUAGE_MODEL,
- RecognizerIntent.LANGUAGE_MODEL_FREE_FORM
- )
+ "EXTRA_LANGUAGE_MODEL" -> {
+ languageModelSet = true
+ when (opts.getString(key)) {
+ "LANGUAGE_MODEL_FREE_FORM" -> intent.putExtra(
+ RecognizerIntent.EXTRA_LANGUAGE_MODEL,
+ RecognizerIntent.LANGUAGE_MODEL_FREE_FORM
+ )
+
+ "LANGUAGE_MODEL_WEB_SEARCH" -> intent.putExtra(
+ RecognizerIntent.EXTRA_LANGUAGE_MODEL,
+ RecognizerIntent.LANGUAGE_MODEL_WEB_SEARCH
+ )
+
+ else -> intent.putExtra(
+ RecognizerIntent.EXTRA_LANGUAGE_MODEL,
+ RecognizerIntent.LANGUAGE_MODEL_FREE_FORM
+ )
+ }
}
"EXTRA_MAX_RESULTS" -> {
@@ -115,21 +170,48 @@ class Voice (context:ReactApplicationContext):RecognitionListener {
}
}
}
+
+ // Set default language model if not specified
+ if (!languageModelSet) {
+ intent.putExtra(
+ RecognizerIntent.EXTRA_LANGUAGE_MODEL,
+ RecognizerIntent.LANGUAGE_MODEL_FREE_FORM
+ )
+ }
- intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, getLocale(this.locale))
+ // Set locale - ensure it's in the correct format
+ val localeString = getLocale(this.locale)
+ Log.d("ASR", "Setting locale to: $localeString")
+ intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, localeString)
+
+ // Add language preference hint for better recognition
+ intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_PREFERENCE, localeString)
+
+ // Enable partial results by default for better UX
+ if (!opts.hasKey("EXTRA_PARTIAL_RESULTS")) {
+ intent.putExtra(RecognizerIntent.EXTRA_PARTIAL_RESULTS, true)
+ Log.d("ASR", "startListening() - Partial results enabled by default")
+ }
+
+ Log.d("ASR", "startListening() - Calling speech?.startListening()")
speech?.startListening(intent)
+ Log.d("ASR", "startListening() - startListening() called")
}
private fun startSpeechWithPermissions(locale: String, opts: ReadableMap, callback: Callback) {
this.locale = locale
+ Log.d("ASR", "startSpeechWithPermissions() - Locale: $locale")
val mainHandler = Handler(reactContext.mainLooper)
mainHandler.post {
try {
+ Log.d("ASR", "startSpeechWithPermissions() - Starting recognition")
startListening(opts)
isRecognizing = true
+ Log.d("ASR", "startSpeechWithPermissions() - Recognition started successfully")
callback.invoke(false)
} catch (e: Exception) {
+ Log.e("ASR", "startSpeechWithPermissions() - Error: ${e.message}", e)
callback.invoke(e.message)
}
}
@@ -147,12 +229,28 @@ class Voice (context:ReactApplicationContext):RecognitionListener {
val granted = grantResults[i] == PackageManager.PERMISSION_GRANTED
permissionsGranted = permissionsGranted && granted
}
- startSpeechWithPermissions(locale!!, opts, callback!!)
+ if (permissionsGranted) {
+ startSpeechWithPermissions(locale!!, opts, callback!!)
+ } else {
+ val errorMessage = "Permission denied: RECORD_AUDIO permission is required"
+ callback?.invoke(errorMessage)
+ }
permissionsGranted
}
+ } else {
+ val errorMessage = "Current activity is null, cannot request permissions"
+ callback?.invoke(errorMessage)
}
return
}
+
+ // Check if permission is granted before starting
+ if (!isPermissionGranted()) {
+ val errorMessage = "RECORD_AUDIO permission is required but not granted"
+ callback?.invoke(errorMessage)
+ return
+ }
+
startSpeechWithPermissions(locale!!, opts, callback!!)
}
@@ -217,19 +315,42 @@ class Voice (context:ReactApplicationContext):RecognitionListener {
}
fun getSpeechRecognitionServices(promise: Promise) {
- val services = reactContext.packageManager
- .queryIntentServices(Intent(RecognitionService.SERVICE_INTERFACE), 0)
- val serviceNames = Arguments.createArray()
- for (service in services) {
- serviceNames.pushString(service.serviceInfo.packageName)
- }
+ val mainHandler = Handler(reactContext.mainLooper)
+ mainHandler.post {
+ try {
+ Log.d("ASR", "getSpeechRecognitionServices() - Starting to query services")
+ // RecognitionService.SERVICE_INTERFACE is the action string for recognition services
+ val intent = Intent(RecognitionService.SERVICE_INTERFACE)
+ // Use MATCH_DEFAULT_ONLY to get only services that can handle the intent by default
+ val services = reactContext.packageManager
+ .queryIntentServices(intent, PackageManager.MATCH_DEFAULT_ONLY)
+
+ Log.d("ASR", "getSpeechRecognitionServices() - Found ${services.size} services")
+ val serviceNames = Arguments.createArray()
+
+ if (services.isEmpty()) {
+ Log.w("ASR", "getSpeechRecognitionServices() - No recognition services found on device")
+ } else {
+ for (service in services) {
+ val packageName = service.serviceInfo.packageName
+ Log.d("ASR", "getSpeechRecognitionServices() - Service: $packageName")
+ serviceNames.pushString(packageName)
+ }
+ }
- promise.resolve(serviceNames)
+ Log.d("ASR", "getSpeechRecognitionServices() - Resolving promise with ${serviceNames.size()} services")
+ promise.resolve(serviceNames)
+ } catch (e: Exception) {
+ Log.e("ASR", "getSpeechRecognitionServices() - Error: ${e.message}", e)
+ e.printStackTrace()
+ promise.reject("GET_SERVICES_ERROR", e.message ?: "Failed to get speech recognition services", e)
+ }
+ }
}
private fun isPermissionGranted(): Boolean {
val permission = Manifest.permission.RECORD_AUDIO
- val res: Int = reactContext.checkCallingOrSelfPermission(permission)
+ val res: Int = ContextCompat.checkSelfPermission(reactContext, permission)
return res == PackageManager.PERMISSION_GRANTED
}
@@ -237,10 +358,32 @@ class Voice (context:ReactApplicationContext):RecognitionListener {
callback.invoke(isRecognizing)
}
+ /**
+ * Send an event to JavaScript via RCTDeviceEventEmitter
+ * Works with both Bridge mode and Bridgeless mode (new architecture)
+ */
private fun sendEvent(eventName: String, params: WritableMap) {
- reactContext
- .getJSModule(RCTDeviceEventEmitter::class.java)
- .emit(eventName, params)
+ // Use main thread handler - required for RCTDeviceEventEmitter
+ val mainHandler = Handler(reactContext.mainLooper)
+ mainHandler.post {
+ try {
+ val deviceEventEmitter = reactContext.getJSModule(RCTDeviceEventEmitter::class.java)
+ if (deviceEventEmitter == null) {
+ Log.e("ASR", "sendEvent($eventName) - DeviceEventEmitter is null!")
+ return@post
+ }
+
+ // Emit the event
+ deviceEventEmitter.emit(eventName, params)
+
+ // Only log non-volume events to avoid log spam
+ if (eventName != "onSpeechVolumeChanged") {
+ Log.d("ASR", "sendEvent($eventName) - Event emitted to JS")
+ }
+ } catch (e: Exception) {
+ Log.e("ASR", "sendEvent($eventName) - Error: ${e.message}", e)
+ }
+ }
}
private fun getErrorText(errorCode: Int): String {
@@ -282,7 +425,7 @@ class Voice (context:ReactApplicationContext):RecognitionListener {
override fun onBufferReceived(buffer: ByteArray?) {
val event = Arguments.createMap()
- event.putBoolean("error", false)
+ event.putBoolean("isFinal", false)
sendEvent("onSpeechRecognized", event)
Log.d("ASR", "onBufferReceived()")
}
@@ -297,10 +440,11 @@ class Voice (context:ReactApplicationContext):RecognitionListener {
override fun onError(error: Int) {
+ isRecognizing = false
val errorMessage = String.format("%d/%s", error, getErrorText(error))
val errorData = Arguments.createMap()
errorData.putString("message", errorMessage)
- errorData.putString("code", java.lang.String.valueOf(errorMessage))
+ errorData.putString("code", java.lang.String.valueOf(error))
val event = Arguments.createMap()
event.putMap("error", errorData)
sendEvent("onSpeechError", event)
@@ -308,30 +452,44 @@ class Voice (context:ReactApplicationContext):RecognitionListener {
}
override fun onResults(results: Bundle?) {
+ isRecognizing = false
+ Log.d("ASR", "onResults() called")
+
val arr = Arguments.createArray()
-
- val matches = results!!.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION)
- if (matches != null) {
- for (result in matches) {
- arr.pushString(result)
+
+ if (results != null) {
+ val matches = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION)
+
+ if (matches != null && matches.isNotEmpty()) {
+ for (result in matches) {
+ if (!result.isNullOrEmpty()) {
+ arr.pushString(result)
+ }
+ }
+ Log.d("ASR", "onResults() - ${arr.size()} results: ${matches.firstOrNull()}")
}
}
+
val event = Arguments.createMap()
event.putArray("value", arr)
sendEvent("onSpeechResults", event)
- Log.d("ASR", "onResults()")
}
override fun onPartialResults(partialResults: Bundle?) {
val arr = Arguments.createArray()
-
- val matches = partialResults?.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION)
- matches?.let {
- for (result in it) {
- arr.pushString(result)
+
+ if (partialResults != null) {
+ val matches = partialResults.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION)
+
+ if (matches != null && matches.isNotEmpty()) {
+ for (result in matches) {
+ if (!result.isNullOrEmpty()) {
+ arr.pushString(result)
+ }
+ }
}
}
-
+
val event = Arguments.createMap()
event.putArray("value", arr)
sendEvent("onSpeechPartialResults", event)
diff --git a/android/src/main/java/com/wenkesj/voice/VoiceModule.kt b/android/src/main/java/com/wenkesj/voice/VoiceModule.kt
index 65da5650..6d852ab6 100644
--- a/android/src/main/java/com/wenkesj/voice/VoiceModule.kt
+++ b/android/src/main/java/com/wenkesj/voice/VoiceModule.kt
@@ -4,53 +4,64 @@ import com.facebook.react.bridge.Callback
import com.facebook.react.bridge.ReactApplicationContext
import com.facebook.react.bridge.Promise
import com.facebook.react.bridge.ReadableMap
-import com.facebook.react.bridge.ReactMethod;
+import com.facebook.react.bridge.ReactMethod
+/**
+ * React Native Voice Module
+ * Provides speech recognition functionality for both old and new architectures
+ */
class VoiceModule internal constructor(context: ReactApplicationContext) :
VoiceSpec(context) {
- private val voice = Voice(context)
+
+ // Single Voice instance - created once and reused
+ private val voiceInstance: Voice by lazy { Voice(context) }
+
+ // For new architecture - provides the Voice instance to the base class
+ override fun getVoice(): Voice = voiceInstance
@ReactMethod
override fun destroySpeech(callback: Callback) {
- voice.destroySpeech(callback)
+ voiceInstance.destroySpeech(callback)
}
@ReactMethod
override fun startSpeech(locale: String, opts: ReadableMap, callback: Callback) {
- voice.startSpeech(locale,opts,callback)
+ voiceInstance.startSpeech(locale, opts, callback)
}
@ReactMethod
override fun stopSpeech(callback: Callback) {
- voice.stopSpeech(callback)
+ voiceInstance.stopSpeech(callback)
}
@ReactMethod
override fun cancelSpeech(callback: Callback) {
- voice.cancelSpeech(callback)
+ voiceInstance.cancelSpeech(callback)
}
@ReactMethod
override fun isSpeechAvailable(callback: Callback) {
- voice.isSpeechAvailable(callback)
+ voiceInstance.isSpeechAvailable(callback)
}
@ReactMethod
override fun getSpeechRecognitionServices(promise: Promise) {
- voice.getSpeechRecognitionServices(promise)
+ voiceInstance.getSpeechRecognitionServices(promise)
}
@ReactMethod
override fun isRecognizing(callback: Callback) {
- voice.isRecognizing(callback)
+ voiceInstance.isRecognizing(callback)
}
+ @ReactMethod
override fun addListener(eventType: String) {
-
+ // Required for NativeEventEmitter - no-op since we use DeviceEventEmitter
}
+ @ReactMethod
override fun removeListeners(count: Double) {
-
+ // Required for NativeEventEmitter - no-op since we use DeviceEventEmitter
}
override fun getName(): String {
diff --git a/android/src/newarch/VoiceSpec.kt b/android/src/newarch/VoiceSpec.kt
index 3f0c1ce6..73804e2c 100644
--- a/android/src/newarch/VoiceSpec.kt
+++ b/android/src/newarch/VoiceSpec.kt
@@ -5,47 +5,53 @@ import com.facebook.react.bridge.Promise
import com.facebook.react.bridge.ReactApplicationContext
import com.facebook.react.bridge.ReadableMap
+/**
+ * TurboModule spec for Voice (New Architecture)
+ * This is an abstract base class - VoiceModule will provide the actual Voice instance
+ */
abstract class VoiceSpec internal constructor(context: ReactApplicationContext) :
NativeVoiceAndroidSpec(context) {
- private val voice = Voice(context)
+
+ // Abstract method to get Voice instance - VoiceModule will provide this
+ protected abstract fun getVoice(): Voice
override fun destroySpeech(callback: Callback) {
- voice.destroySpeech(callback)
+ getVoice().destroySpeech(callback)
}
override fun startSpeech(locale: String, opts: ReadableMap, callback: Callback) {
- voice.startSpeech(locale,opts,callback)
+ getVoice().startSpeech(locale, opts, callback)
}
override fun stopSpeech(callback: Callback) {
- voice.stopSpeech(callback)
+ getVoice().stopSpeech(callback)
}
override fun cancelSpeech(callback: Callback) {
- voice.cancelSpeech(callback)
+ getVoice().cancelSpeech(callback)
}
override fun isSpeechAvailable(callback: Callback) {
- voice.isSpeechAvailable(callback)
+ getVoice().isSpeechAvailable(callback)
}
override fun getSpeechRecognitionServices(promise: Promise) {
- voice.getSpeechRecognitionServices(promise)
+ getVoice().getSpeechRecognitionServices(promise)
}
override fun isRecognizing(callback: Callback) {
- voice.isRecognizing(callback)
+ getVoice().isRecognizing(callback)
}
override fun addListener(eventType: String) {
-
+ // Required for NativeEventEmitter
}
override fun removeListeners(count: Double) {
-
+ // Required for NativeEventEmitter
}
- override fun getName(): String {
+ override fun getName(): String {
return NAME
}
diff --git a/android/src/oldarch/VoiceSpec.kt b/android/src/oldarch/VoiceSpec.kt
index f8c869e4..3aaa4399 100644
--- a/android/src/oldarch/VoiceSpec.kt
+++ b/android/src/oldarch/VoiceSpec.kt
@@ -6,25 +6,37 @@ import com.facebook.react.bridge.ReactApplicationContext
import com.facebook.react.bridge.ReactContextBaseJavaModule
import com.facebook.react.bridge.ReadableMap
-
+/**
+ * Native Module spec for Voice (Old Architecture / Bridge)
+ * This is an abstract base class - VoiceModule will provide the actual implementation
+ */
abstract class VoiceSpec internal constructor(context: ReactApplicationContext) :
ReactContextBaseJavaModule(context) {
- abstract fun destroySpeech(callback:Callback)
+ // For compatibility with VoiceModule (shared between old/new arch)
+ protected open fun getVoice(): Voice {
+ throw NotImplementedError("getVoice() must be implemented by subclass")
+ }
+
+ abstract fun destroySpeech(callback: Callback)
- abstract fun startSpeech(locale:String, opts:ReadableMap, callback:Callback)
+ abstract fun startSpeech(locale: String, opts: ReadableMap, callback: Callback)
- abstract fun stopSpeech(callback:Callback)
+ abstract fun stopSpeech(callback: Callback)
- abstract fun cancelSpeech(callback:Callback)
+ abstract fun cancelSpeech(callback: Callback)
- abstract fun isSpeechAvailable(callback:Callback)
+ abstract fun isSpeechAvailable(callback: Callback)
abstract fun getSpeechRecognitionServices(promise: Promise)
- abstract fun isRecognizing(callback:Callback)
+ abstract fun isRecognizing(callback: Callback)
+
+ abstract fun addListener(eventType: String)
- abstract fun addListener(eventType:String)
+ abstract fun removeListeners(count: Double)
- abstract fun removeListeners(count:Double)
+ companion object {
+ const val NAME = "Voice"
+ }
}
diff --git a/example/ios/Podfile.lock b/example/ios/Podfile.lock
index 97277570..950cfdea 100644
--- a/example/ios/Podfile.lock
+++ b/example/ios/Podfile.lock
@@ -1772,7 +1772,7 @@ SPEC CHECKSUMS:
React-logger: addd140841248966c2547eb94836399cc1061f4d
React-Mapbuffer: 029b5332e78af8c67c4b5e65edfc717068b8eac1
React-microtasksnativemodule: f30949ee318ba90b9668de1e325b98838b9a4da2
- react-native-voice: 7548ce5ff00cd04bdb8e42bf6e2bd3e8b71d4f2b
+ react-native-voice: bf610989fa43b0c98e60f86925200f9aa8803050
React-nativeconfig: 470fce6d871c02dc5eff250a362d56391b7f52d6
React-NativeModulesApple: 1586448c61a7c2bd4040cc03ccde66a72037e77e
React-perflogger: c8860eaab4fe60d628b27bf0086a372c429fc74f
@@ -1805,4 +1805,4 @@ SPEC CHECKSUMS:
PODFILE CHECKSUM: 71a689932e49f453bd6454dd189b45915dda66a0
-COCOAPODS: 1.15.2
+COCOAPODS: 1.16.2
diff --git a/example/ios/example.xcodeproj/project.pbxproj b/example/ios/example.xcodeproj/project.pbxproj
index 3dd2c496..b3e03545 100644
--- a/example/ios/example.xcodeproj/project.pbxproj
+++ b/example/ios/example.xcodeproj/project.pbxproj
@@ -590,7 +590,10 @@
"-DFOLLY_CFG_NO_COROUTINES=1",
"-DFOLLY_HAVE_CLOCK_GETTIME=1",
);
- OTHER_LDFLAGS = "$(inherited) ";
+ OTHER_LDFLAGS = (
+ "$(inherited)",
+ " ",
+ );
REACT_NATIVE_PATH = "${PODS_ROOT}/../../node_modules/react-native";
SDKROOT = iphoneos;
SWIFT_ACTIVE_COMPILATION_CONDITIONS = "$(inherited) DEBUG";
@@ -659,7 +662,10 @@
"-DFOLLY_CFG_NO_COROUTINES=1",
"-DFOLLY_HAVE_CLOCK_GETTIME=1",
);
- OTHER_LDFLAGS = "$(inherited) ";
+ OTHER_LDFLAGS = (
+ "$(inherited)",
+ " ",
+ );
REACT_NATIVE_PATH = "${PODS_ROOT}/../../node_modules/react-native";
SDKROOT = iphoneos;
USE_HERMES = true;
diff --git a/example/src/VoiceTest.tsx b/example/src/VoiceTest.tsx
index 5b29e469..2032c03b 100644
--- a/example/src/VoiceTest.tsx
+++ b/example/src/VoiceTest.tsx
@@ -1,10 +1,12 @@
-import { Component } from 'react';
+import {useEffect, useState, useCallback} from 'react';
import {
StyleSheet,
Text,
View,
Image,
TouchableHighlight,
+ ScrollView,
+ Platform,
} from 'react-native';
import Voice, {
@@ -13,221 +15,355 @@ import Voice, {
type SpeechErrorEvent,
} from '@react-native-voice/voice';
-type Props = {};
-type State = {
- recognized: string;
- pitch: string;
- error: string;
- end: string;
- started: string;
- results: string[];
- partialResults: string[];
-};
-
-class VoiceTest extends Component {
- state = {
- recognized: '',
- pitch: '',
- error: '',
- end: '',
- started: '',
- results: [],
- partialResults: [],
- };
+// Set to true to enable debug logging
+const DEBUG = __DEV__;
- constructor(props: Props) {
- super(props);
- Voice.onSpeechStart = this.onSpeechStart;
- Voice.onSpeechRecognized = this.onSpeechRecognized;
- Voice.onSpeechEnd = this.onSpeechEnd;
- Voice.onSpeechError = this.onSpeechError;
- Voice.onSpeechResults = this.onSpeechResults;
- Voice.onSpeechPartialResults = this.onSpeechPartialResults;
- Voice.onSpeechVolumeChanged = this.onSpeechVolumeChanged;
- }
-
- componentWillUnmount() {
- Voice.destroy().then(Voice.removeAllListeners);
- }
-
- onSpeechStart = (e: any) => {
- console.log('onSpeechStart: ', e);
- this.setState({
- started: '√',
- });
- };
+function VoiceTest() {
+ const [recognized, setRecognized] = useState('');
+ const [pitch, setPitch] = useState('');
+ const [error, setError] = useState('');
+ const [end, setEnd] = useState('');
+ const [started, setStarted] = useState('');
+ const [results, setResults] = useState([]);
+ const [partialResults, setPartialResults] = useState([]);
- onSpeechRecognized = (e: SpeechRecognizedEvent) => {
- console.log('onSpeechRecognized: ', e);
- this.setState({
- recognized: '√',
- });
- };
+ const log = useCallback((message: string) => {
+ if (DEBUG) {
+ console.log('[Voice]', message);
+ }
+ }, []);
- onSpeechEnd = (e: any) => {
- console.log('onSpeechEnd: ', e);
- this.setState({
- end: '√',
- });
- };
+ const onSpeechStart = useCallback(() => {
+ log('Speech started');
+ setStarted('√');
+ }, [log]);
- onSpeechError = (e: SpeechErrorEvent) => {
- console.log('onSpeechError: ', e);
- this.setState({
- error: JSON.stringify(e.error),
- });
- };
+ const onSpeechRecognized = useCallback((_e: SpeechRecognizedEvent) => {
+ setRecognized('√');
+ }, []);
- onSpeechResults = (e: SpeechResultsEvent) => {
- console.log('onSpeechResults: ', e);
- this.setState({
- results: e.value && e.value?.length > 0 ? e.value : [],
- });
- };
+ const onSpeechEnd = useCallback(() => {
+ log('Speech ended');
+ setEnd('√');
+ }, [log]);
- onSpeechPartialResults = (e: SpeechResultsEvent) => {
- console.log('onSpeechPartialResults: ', e);
- this.setState({
- partialResults: e.value && e.value?.length > 0 ? e.value : [],
- });
- };
+ const onSpeechError = useCallback(
+ (e: SpeechErrorEvent) => {
+ log(`Error: ${JSON.stringify(e.error)}`);
+ setError(JSON.stringify(e.error));
+ },
+ [log],
+ );
- onSpeechVolumeChanged = (e: any) => {
- console.log('onSpeechVolumeChanged: ', e);
- this.setState({
- pitch: e.value,
- });
- };
+ const onSpeechResults = useCallback(
+ (e: SpeechResultsEvent) => {
+ const newResults = e.value ?? [];
+ log(`Results: ${newResults.join(', ')}`);
+ setResults(newResults);
+ },
+ [log],
+ );
+
+ const onSpeechPartialResults = useCallback((e: SpeechResultsEvent) => {
+ const newPartialResults = e.value ?? [];
+ setPartialResults(newPartialResults);
+ }, []);
+
+ const onSpeechVolumeChanged = useCallback((e: any) => {
+ setPitch(e.value);
+ }, []);
+
+ useEffect(() => {
+ Voice.onSpeechStart = onSpeechStart;
+ Voice.onSpeechRecognized = onSpeechRecognized;
+ Voice.onSpeechEnd = onSpeechEnd;
+ Voice.onSpeechError = onSpeechError;
+ Voice.onSpeechResults = onSpeechResults;
+ Voice.onSpeechPartialResults = onSpeechPartialResults;
+ Voice.onSpeechVolumeChanged = onSpeechVolumeChanged;
+
+ return () => {
+ Voice.destroy().then(Voice.removeAllListeners);
+ };
+ }, [
+ onSpeechStart,
+ onSpeechRecognized,
+ onSpeechEnd,
+ onSpeechError,
+ onSpeechResults,
+ onSpeechPartialResults,
+ onSpeechVolumeChanged,
+ ]);
- _startRecognizing = async () => {
- this.setState({
- recognized: '',
- pitch: '',
- error: '',
- started: '',
- results: [],
- partialResults: [],
- end: '',
- });
+ const _clearState = () => {
+ setRecognized('');
+ setPitch('');
+ setError('');
+ setStarted('');
+ setResults([]);
+ setPartialResults([]);
+ setEnd('');
+ };
+ const _startRecognizing = async () => {
+ _clearState();
try {
+ log('Starting speech recognition...');
await Voice.start('en-US');
+ log('Speech recognition started');
} catch (e) {
- console.error(e);
+ const errorMsg = e instanceof Error ? e.message : String(e);
+ log(`Error: ${errorMsg}`);
+ setError(errorMsg);
}
};
- _stopRecognizing = async () => {
+ const _stopRecognizing = async () => {
try {
await Voice.stop();
+ log('Speech recognition stopped');
} catch (e) {
- console.error(e);
+ if (DEBUG) console.error(e);
}
};
- _cancelRecognizing = async () => {
+ const _cancelRecognizing = async () => {
try {
await Voice.cancel();
+ log('Speech recognition cancelled');
} catch (e) {
- console.error(e);
+ if (DEBUG) console.error(e);
}
};
- _destroyRecognizer = async () => {
+ const _destroyRecognizer = async () => {
try {
await Voice.destroy();
+ log('Speech recognizer destroyed');
} catch (e) {
- console.error(e);
+ if (DEBUG) console.error(e);
}
- this.setState({
- recognized: '',
- pitch: '',
- error: '',
- started: '',
- results: [],
- partialResults: [],
- end: '',
- });
+ _clearState();
};
- render() {
- return (
+ return (
+
- Welcome to React Native Voice!
+ React Native Voice
Press the button and start speaking.
- {`Started: ${this.state.started}`}
- {`Recognized: ${
- this.state.recognized
- }`}
- {`Pitch: ${this.state.pitch}`}
- {`Error: ${this.state.error}`}
- Results
- {this.state.results.map((result, index) => {
- return (
-
- {result}
-
- );
- })}
- Partial Results
- {this.state.partialResults.map((result, index) => {
- return (
-
- {result}
-
- );
- })}
- {`End: ${this.state.end}`}
-
+
+ {/* Status indicators */}
+
+ Started:
+
+ {started || '—'}
+
+ End:
+
+ {end || '—'}
+
+
+
+ {/* Error display */}
+ {error ? Error: {error} : null}
+
+ {/* Results */}
+ Results
+
+ {results.length > 0 ? (
+ results.map((result, index) => (
+
+ {result}
+
+ ))
+ ) : (
+ Speak to see results...
+ )}
+
+
+ {/* Partial Results */}
+ Live Transcription
+
+ {partialResults.length > 0 ? (
+ {partialResults[0]}
+ ) : (
+ ...
+ )}
+
+
+ {/* Volume indicator */}
+ {pitch ? (
+
+
+
+ ) : null}
+
+ {/* Controls */}
+
-
- Stop Recognizing
-
-
- Cancel
-
-
- Destroy
-
+
+
+
+ Stop
+
+
+ Cancel
+
+
+ Reset
+
+
+
+ {Platform.OS === 'ios' && (
+
+ Tip: On iOS, press Stop when done speaking
+
+ )}
- );
- }
+
+ );
}
const styles = StyleSheet.create({
- button: {
- width: 50,
- height: 50,
+ scrollContainer: {
+ flexGrow: 1,
},
container: {
flex: 1,
justifyContent: 'center',
alignItems: 'center',
backgroundColor: '#F5FCFF',
+ paddingVertical: 30,
+ paddingHorizontal: 20,
},
welcome: {
- fontSize: 20,
+ fontSize: 24,
+ fontWeight: 'bold',
textAlign: 'center',
- margin: 10,
+ marginBottom: 10,
+ color: '#333',
},
- action: {
+ instructions: {
textAlign: 'center',
- color: '#0000FF',
- marginVertical: 5,
+ color: '#666',
+ marginBottom: 20,
+ },
+ statusRow: {
+ flexDirection: 'row',
+ alignItems: 'center',
+ marginBottom: 15,
+ },
+ statusLabel: {
+ fontSize: 14,
+ color: '#888',
+ marginRight: 5,
+ },
+ statusValue: {
+ fontSize: 14,
+ color: '#ccc',
+ marginRight: 15,
+ },
+ statusActive: {
+ color: '#4CAF50',
fontWeight: 'bold',
},
- instructions: {
+ sectionTitle: {
+ fontSize: 16,
+ fontWeight: '600',
+ color: '#333',
+ marginTop: 15,
+ marginBottom: 8,
+ },
+ resultsContainer: {
+ backgroundColor: '#fff',
+ borderRadius: 10,
+ padding: 15,
+ width: '100%',
+ minHeight: 60,
+ shadowColor: '#000',
+ shadowOffset: {width: 0, height: 1},
+ shadowOpacity: 0.1,
+ shadowRadius: 3,
+ elevation: 2,
+ },
+ resultText: {
+ fontSize: 18,
+ color: '#2196F3',
+ textAlign: 'center',
+ },
+ partialContainer: {
+ backgroundColor: '#E3F2FD',
+ borderRadius: 10,
+ padding: 15,
+ width: '100%',
+ minHeight: 50,
+ },
+ partialText: {
+ fontSize: 16,
+ color: '#1976D2',
textAlign: 'center',
- color: '#333333',
- marginBottom: 5,
+ fontStyle: 'italic',
},
- stat: {
+ placeholder: {
+ fontSize: 14,
+ color: '#aaa',
textAlign: 'center',
- color: '#B0171F',
- marginBottom: 1,
+ },
+ volumeContainer: {
+ width: '80%',
+ height: 6,
+ backgroundColor: '#E0E0E0',
+ borderRadius: 3,
+ marginTop: 15,
+ overflow: 'hidden',
+ },
+ volumeBar: {
+ height: '100%',
+ backgroundColor: '#4CAF50',
+ borderRadius: 3,
+ },
+ buttonContainer: {
+ marginTop: 25,
+ marginBottom: 15,
+ borderRadius: 30,
+ },
+ button: {
+ width: 60,
+ height: 60,
+ },
+ actionsRow: {
+ flexDirection: 'row',
+ justifyContent: 'center',
+ gap: 20,
+ },
+ action: {
+ fontSize: 16,
+ color: '#2196F3',
+ paddingVertical: 10,
+ paddingHorizontal: 15,
+ },
+ errorText: {
+ color: '#F44336',
+ fontSize: 14,
+ textAlign: 'center',
+ marginBottom: 10,
+ },
+ hint: {
+ marginTop: 20,
+ fontSize: 12,
+ color: '#888',
+ fontStyle: 'italic',
},
});
diff --git a/example/src/VoiceTestFuncComp.tsx b/example/src/VoiceTestFuncComp.tsx
index 75b522fb..4ba132d6 100644
--- a/example/src/VoiceTestFuncComp.tsx
+++ b/example/src/VoiceTestFuncComp.tsx
@@ -1,11 +1,5 @@
-import { useEffect, useState } from 'react';
-import {
- StyleSheet,
- Text,
- View,
- Image,
- TouchableHighlight,
-} from 'react-native';
+import {useEffect, useState} from 'react';
+import {StyleSheet, Text, View, Image, TouchableHighlight} from 'react-native';
import Voice, {
type SpeechRecognizedEvent,
@@ -21,6 +15,7 @@ function VoiceTest() {
const [started, setStarted] = useState('');
const [results, setResults] = useState([]);
const [partialResults, setPartialResults] = useState([]);
+ const [services, setServices] = useState([]);
useEffect(() => {
Voice.onSpeechStart = onSpeechStart;
@@ -106,6 +101,18 @@ function VoiceTest() {
_clearState();
};
+ const _getServices = async () => {
+ try {
+ console.log('Getting speech recognition services...');
+ const servicesList = await Voice.getSpeechRecognitionServices();
+ console.log('Services found:', servicesList);
+ setServices(servicesList);
+ } catch (e) {
+ console.error('Error getting services:', e);
+ setError(`Services Error: ${e}`);
+ }
+ };
+
const _clearState = () => {
setRecognized('');
setVolume('');
@@ -143,6 +150,16 @@ function VoiceTest() {
);
})}
{`End: ${end}`}
+ Recognition Services
+ {services.length > 0 ? (
+ services.map((service, index) => (
+
+ {service}
+
+ ))
+ ) : (
+ No services found
+ )}
@@ -155,6 +172,9 @@ function VoiceTest() {
Destroy
+
+ Get Services
+
);
}
diff --git a/example/yarn.lock b/example/yarn.lock
index e1ab6df6..fdbf3ab4 100644
--- a/example/yarn.lock
+++ b/example/yarn.lock
@@ -19,13 +19,6 @@
js-tokens "^4.0.0"
picocolors "^1.0.0"
-"@babel/code-frame@~7.10.4":
- version "7.10.4"
- resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.10.4.tgz#168da1a36e90da68ae8d49c0f1b48c7c6249213a"
- integrity sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==
- dependencies:
- "@babel/highlight" "^7.10.4"
-
"@babel/compat-data@^7.22.6", "@babel/compat-data@^7.26.5", "@babel/compat-data@^7.26.8":
version "7.26.8"
resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.26.8.tgz#821c1d35641c355284d4a870b8a4a7b0c141e367"
@@ -218,16 +211,6 @@
"@babel/template" "^7.26.9"
"@babel/types" "^7.26.9"
-"@babel/highlight@^7.10.4":
- version "7.25.9"
- resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.25.9.tgz#8141ce68fc73757946f983b343f1231f4691acc6"
- integrity sha512-llL88JShoCsth8fF8R4SJnIn+WLvR6ccFxu1H3FlMhDontdcmZWf2HgIZ7AIqV3Xcck1idlohrN4EUBQz6klbw==
- dependencies:
- "@babel/helper-validator-identifier" "^7.25.9"
- chalk "^2.4.2"
- js-tokens "^4.0.0"
- picocolors "^1.0.0"
-
"@babel/parser@^7.1.0", "@babel/parser@^7.13.16", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.23.9", "@babel/parser@^7.25.3", "@babel/parser@^7.26.9":
version "7.26.9"
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.26.9.tgz#d9e78bee6dc80f9efd8f2349dcfbbcdace280fd5"
@@ -1118,54 +1101,6 @@
resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.57.1.tgz#de633db3ec2ef6a3c89e2f19038063e8a122e2c2"
integrity sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==
-"@expo/config-plugins@^9.0.9":
- version "9.0.15"
- resolved "https://registry.yarnpkg.com/@expo/config-plugins/-/config-plugins-9.0.15.tgz#3ec80e9a6ea915205816be7f61f0ab9f3f14f6c4"
- integrity sha512-elKY/zIpAJ40RH26iwfyp+hwgeyPgIXX0SrCSOcjeJLsMsCmMac9ewvb+AN8y4k+N7m5lD/dMZupsaateKTFwA==
- dependencies:
- "@expo/config-types" "^52.0.4"
- "@expo/json-file" "~9.0.1"
- "@expo/plist" "^0.2.1"
- "@expo/sdk-runtime-versions" "^1.0.0"
- chalk "^4.1.2"
- debug "^4.3.5"
- getenv "^1.0.0"
- glob "^10.4.2"
- resolve-from "^5.0.0"
- semver "^7.5.4"
- slash "^3.0.0"
- slugify "^1.6.6"
- xcode "^3.0.1"
- xml2js "0.6.0"
-
-"@expo/config-types@^52.0.4":
- version "52.0.4"
- resolved "https://registry.yarnpkg.com/@expo/config-types/-/config-types-52.0.4.tgz#6ff4fbbfd0d09894d8c50d15a89816cc2d9d2531"
- integrity sha512-oMGrb2o3niVCIfjnIHFrOoiDA9jGb0lc3G4RI1UiO//KjULBaQr3QTBoKDzZQwMqDV1AgYgSr9mgEcnX3LqhIg==
-
-"@expo/json-file@~9.0.1":
- version "9.0.2"
- resolved "https://registry.yarnpkg.com/@expo/json-file/-/json-file-9.0.2.tgz#ec508c2ad17490e0c664c9d7e2ae0ce65915d3ed"
- integrity sha512-yAznIUrybOIWp3Uax7yRflB0xsEpvIwIEqIjao9SGi2Gaa+N0OamWfe0fnXBSWF+2zzF4VvqwT4W5zwelchfgw==
- dependencies:
- "@babel/code-frame" "~7.10.4"
- json5 "^2.2.3"
- write-file-atomic "^2.3.0"
-
-"@expo/plist@^0.2.1":
- version "0.2.2"
- resolved "https://registry.yarnpkg.com/@expo/plist/-/plist-0.2.2.tgz#2563b71b4aa78dc9dbc34cc3d2e1011e994bc9cd"
- integrity sha512-ZZGvTO6vEWq02UAPs3LIdja+HRO18+LRI5QuDl6Hs3Ps7KX7xU6Y6kjahWKY37Rx2YjNpX07dGpBFzzC+vKa2g==
- dependencies:
- "@xmldom/xmldom" "~0.7.7"
- base64-js "^1.2.3"
- xmlbuilder "^14.0.0"
-
-"@expo/sdk-runtime-versions@^1.0.0":
- version "1.0.0"
- resolved "https://registry.yarnpkg.com/@expo/sdk-runtime-versions/-/sdk-runtime-versions-1.0.0.tgz#d7ebd21b19f1c6b0395e50d78da4416941c57f7c"
- integrity sha512-Doz2bfiPndXYFPMRwPyGa1k5QaKDVpY806UJj570epIiMzWaYyCtobasyfC++qfIXVb5Ocy7r3tP9d62hAQ7IQ==
-
"@hapi/hoek@^9.0.0", "@hapi/hoek@^9.3.0":
version "9.3.0"
resolved "https://registry.yarnpkg.com/@hapi/hoek/-/hoek-9.3.0.tgz#8368869dcb735be2e7f5cb7647de78e167a251fb"
@@ -1197,18 +1132,6 @@
resolved "https://registry.yarnpkg.com/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz#4a2868d75d6d6963e423bcf90b7fd1be343409d3"
integrity sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==
-"@isaacs/cliui@^8.0.2":
- version "8.0.2"
- resolved "https://registry.yarnpkg.com/@isaacs/cliui/-/cliui-8.0.2.tgz#b37667b7bc181c168782259bab42474fbf52b550"
- integrity sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==
- dependencies:
- string-width "^5.1.2"
- string-width-cjs "npm:string-width@^4.2.0"
- strip-ansi "^7.0.1"
- strip-ansi-cjs "npm:strip-ansi@^6.0.1"
- wrap-ansi "^8.1.0"
- wrap-ansi-cjs "npm:wrap-ansi@^7.0.0"
-
"@isaacs/ttlcache@^1.4.1":
version "1.4.1"
resolved "https://registry.yarnpkg.com/@isaacs/ttlcache/-/ttlcache-1.4.1.tgz#21fb23db34e9b6220c6ba023a0118a2dd3461ea2"
@@ -1508,11 +1431,6 @@
"@nodelib/fs.scandir" "2.1.5"
fastq "^1.6.0"
-"@pkgjs/parseargs@^0.11.0":
- version "0.11.0"
- resolved "https://registry.yarnpkg.com/@pkgjs/parseargs/-/parseargs-0.11.0.tgz#a77ea742fab25775145434eb1d2328cf5013ac33"
- integrity sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==
-
"@react-native-community/cli-clean@15.0.1":
version "15.0.1"
resolved "https://registry.yarnpkg.com/@react-native-community/cli-clean/-/cli-clean-15.0.1.tgz#80ce09ffe0d62bb265447007f24dc8dcbf8fe7d3"
@@ -1668,7 +1586,6 @@
"@react-native-voice/voice@file:..":
version "3.2.4"
dependencies:
- "@expo/config-plugins" "^9.0.9"
invariant "^2.2.4"
"@react-native/assets-registry@0.76.2":
@@ -2156,16 +2073,6 @@
resolved "https://registry.yarnpkg.com/@ungap/structured-clone/-/structured-clone-1.3.0.tgz#d06bbb384ebcf6c505fde1c3d0ed4ddffe0aaff8"
integrity sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==
-"@xmldom/xmldom@^0.8.8":
- version "0.8.10"
- resolved "https://registry.yarnpkg.com/@xmldom/xmldom/-/xmldom-0.8.10.tgz#a1337ca426aa61cef9fe15b5b28e340a72f6fa99"
- integrity sha512-2WALfTl4xo2SkGCYRt6rDTFfk9R1czmBvUQy12gK2KuRKIpWEhcbbzy8EZXtz/jkRqHX8bFEc6FC1HjX4TUWYw==
-
-"@xmldom/xmldom@~0.7.7":
- version "0.7.13"
- resolved "https://registry.yarnpkg.com/@xmldom/xmldom/-/xmldom-0.7.13.tgz#ff34942667a4e19a9f4a0996a76814daac364cf3"
- integrity sha512-lm2GW5PkosIzccsaZIz7tp8cPADSIlIHWDFTR1N0SzfinhhYgeIQjFMz4rYzanCScr3DqQLeomUDArp6MWKm+g==
-
abort-controller@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/abort-controller/-/abort-controller-3.0.0.tgz#eaf54d53b62bae4138e809ca225c8439a6efb392"
@@ -2232,12 +2139,7 @@ ansi-regex@^5.0.0, ansi-regex@^5.0.1:
resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304"
integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==
-ansi-regex@^6.0.1:
- version "6.1.0"
- resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.1.0.tgz#95ec409c69619d6cb1b8b34f14b660ef28ebd654"
- integrity sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==
-
-ansi-styles@^3.2.0, ansi-styles@^3.2.1:
+ansi-styles@^3.2.0:
version "3.2.1"
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d"
integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==
@@ -2256,11 +2158,6 @@ ansi-styles@^5.0.0:
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b"
integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==
-ansi-styles@^6.1.0:
- version "6.2.1"
- resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-6.2.1.tgz#0e62320cf99c21afff3b3012192546aacbfb05c5"
- integrity sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==
-
anymatch@^3.0.3:
version "3.1.3"
resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.3.tgz#790c58b19ba1720a84205b57c618d5ad8524973e"
@@ -2527,16 +2424,11 @@ balanced-match@^1.0.0:
resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee"
integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
-base64-js@^1.2.3, base64-js@^1.3.1, base64-js@^1.5.1:
+base64-js@^1.3.1, base64-js@^1.5.1:
version "1.5.1"
resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a"
integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==
-big-integer@1.6.x:
- version "1.6.52"
- resolved "https://registry.yarnpkg.com/big-integer/-/big-integer-1.6.52.tgz#60a887f3047614a8e1bffe5d7173490a97dc8c85"
- integrity sha512-QxD8cf2eVqJOOz63z6JIN9BzvVs/dlySa5HGSBH5xtR8dPteIRQnBxxKqkNTiT6jbDTF6jAfrd4oMcND9RGbQg==
-
bl@^4.1.0:
version "4.1.0"
resolved "https://registry.yarnpkg.com/bl/-/bl-4.1.0.tgz#451535264182bec2fbbc83a62ab98cf11d9f7b3a"
@@ -2546,20 +2438,6 @@ bl@^4.1.0:
inherits "^2.0.4"
readable-stream "^3.4.0"
-bplist-creator@0.1.0:
- version "0.1.0"
- resolved "https://registry.yarnpkg.com/bplist-creator/-/bplist-creator-0.1.0.tgz#018a2d1b587f769e379ef5519103730f8963ba1e"
- integrity sha512-sXaHZicyEEmY86WyueLTQesbeoH/mquvarJaQNbjuOQO+7gbFcDEWqKmcWA4cOTLzFlfgvkiVxolk1k5bBIpmg==
- dependencies:
- stream-buffers "2.2.x"
-
-bplist-parser@0.3.1:
- version "0.3.1"
- resolved "https://registry.yarnpkg.com/bplist-parser/-/bplist-parser-0.3.1.tgz#e1c90b2ca2a9f9474cc72f6862bbf3fee8341fd1"
- integrity sha512-PyJxiNtA5T2PlLIeBot4lbp7rj4OadzjnMZD/G5zuBNt8ei/yCU7+wW0h2bag9vr8c+/WuRWmSxbqAl9hL1rBA==
- dependencies:
- big-integer "1.6.x"
-
brace-expansion@^1.1.7:
version "1.1.11"
resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd"
@@ -2682,15 +2560,6 @@ caniuse-lite@^1.0.30001688:
resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001700.tgz#26cd429cf09b4fd4e745daf4916039c794d720f6"
integrity sha512-2S6XIXwaE7K7erT8dY+kLQcpa5ms63XlRkMkReXjle+kf6c5g38vyMl+Z5y8dSxOFDhcFe+nxnn261PLxBSQsQ==
-chalk@^2.4.2:
- version "2.4.2"
- resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424"
- integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==
- dependencies:
- ansi-styles "^3.2.1"
- escape-string-regexp "^1.0.5"
- supports-color "^5.3.0"
-
chalk@^4.0.0, chalk@^4.1.0, chalk@^4.1.2:
version "4.1.2"
resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01"
@@ -2929,7 +2798,7 @@ create-jest@^29.7.0:
jest-util "^29.7.0"
prompts "^2.0.1"
-cross-spawn@^7.0.0, cross-spawn@^7.0.2, cross-spawn@^7.0.3:
+cross-spawn@^7.0.2, cross-spawn@^7.0.3:
version "7.0.6"
resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f"
integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==
@@ -2982,7 +2851,7 @@ debug@2.6.9, debug@^2.2.0, debug@^2.6.9:
dependencies:
ms "2.0.0"
-debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4, debug@^4.3.5:
+debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4:
version "4.4.0"
resolved "https://registry.yarnpkg.com/debug/-/debug-4.4.0.tgz#2b3f2aea2ffeb776477460267377dc8710faba8a"
integrity sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==
@@ -3084,11 +2953,6 @@ dunder-proto@^1.0.0, dunder-proto@^1.0.1:
es-errors "^1.3.0"
gopd "^1.2.0"
-eastasianwidth@^0.2.0:
- version "0.2.0"
- resolved "https://registry.yarnpkg.com/eastasianwidth/-/eastasianwidth-0.2.0.tgz#696ce2ec0aa0e6ea93a397ffcf24aa7840c827cb"
- integrity sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==
-
ee-first@1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
@@ -3109,11 +2973,6 @@ emoji-regex@^8.0.0:
resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37"
integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==
-emoji-regex@^9.2.2:
- version "9.2.2"
- resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72"
- integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==
-
encodeurl@~1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59"
@@ -3668,14 +3527,6 @@ for-each@^0.3.3:
dependencies:
is-callable "^1.2.7"
-foreground-child@^3.1.0:
- version "3.3.0"
- resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-3.3.0.tgz#0ac8644c06e431439f8561db8ecf29a7b5519c77"
- integrity sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==
- dependencies:
- cross-spawn "^7.0.0"
- signal-exit "^4.0.1"
-
fresh@0.5.2:
version "0.5.2"
resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7"
@@ -3775,11 +3626,6 @@ get-symbol-description@^1.1.0:
es-errors "^1.3.0"
get-intrinsic "^1.2.6"
-getenv@^1.0.0:
- version "1.0.0"
- resolved "https://registry.yarnpkg.com/getenv/-/getenv-1.0.0.tgz#874f2e7544fbca53c7a4738f37de8605c3fcfc31"
- integrity sha512-7yetJWqbS9sbn0vIfliPsFgoXMKn/YMF+Wuiog97x+urnSRRRZ7xB+uVkwGKzRgq9CDFfMQnE9ruL5DHv9c6Xg==
-
glob-parent@^5.1.2:
version "5.1.2"
resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4"
@@ -3794,18 +3640,6 @@ glob-parent@^6.0.2:
dependencies:
is-glob "^4.0.3"
-glob@^10.4.2:
- version "10.4.5"
- resolved "https://registry.yarnpkg.com/glob/-/glob-10.4.5.tgz#f4d9f0b90ffdbab09c9d77f5f29b4262517b0956"
- integrity sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==
- dependencies:
- foreground-child "^3.1.0"
- jackspeak "^3.1.2"
- minimatch "^9.0.4"
- minipass "^7.1.2"
- package-json-from-dist "^1.0.0"
- path-scurry "^1.11.1"
-
glob@^7.1.1, glob@^7.1.3, glob@^7.1.4:
version "7.2.3"
resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b"
@@ -3870,11 +3704,6 @@ has-bigints@^1.0.2:
resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.1.0.tgz#28607e965ac967e03cd2a2c70a2636a1edad49fe"
integrity sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg==
-has-flag@^3.0.0:
- version "3.0.0"
- resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd"
- integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==
-
has-flag@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b"
@@ -4368,15 +4197,6 @@ iterator.prototype@^1.1.4:
has-symbols "^1.1.0"
set-function-name "^2.0.2"
-jackspeak@^3.1.2:
- version "3.4.3"
- resolved "https://registry.yarnpkg.com/jackspeak/-/jackspeak-3.4.3.tgz#8833a9d89ab4acde6188942bd1c53b6390ed5a8a"
- integrity sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==
- dependencies:
- "@isaacs/cliui" "^8.0.2"
- optionalDependencies:
- "@pkgjs/parseargs" "^0.11.0"
-
jest-changed-files@^29.7.0:
version "29.7.0"
resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.7.0.tgz#1c06d07e77c78e1585d020424dedc10d6e17ac3a"
@@ -4967,11 +4787,6 @@ loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0:
dependencies:
js-tokens "^3.0.0 || ^4.0.0"
-lru-cache@^10.2.0:
- version "10.4.3"
- resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.4.3.tgz#410fc8a17b70e598013df257c2446b7f3383f119"
- integrity sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==
-
lru-cache@^5.1.1:
version "5.1.1"
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920"
@@ -5277,11 +5092,6 @@ minimist@^1.2.6:
resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c"
integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==
-"minipass@^5.0.0 || ^6.0.2 || ^7.0.0", minipass@^7.1.2:
- version "7.1.2"
- resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.2.tgz#93a9626ce5e5e66bd4db86849e7515e92340a707"
- integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==
-
mkdirp@^0.5.1:
version "0.5.6"
resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.6.tgz#7def03d2432dcae4ba1d611445c48396062255f6"
@@ -5567,11 +5377,6 @@ p-try@^2.0.0:
resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6"
integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==
-package-json-from-dist@^1.0.0:
- version "1.0.1"
- resolved "https://registry.yarnpkg.com/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz#4f1471a010827a86f94cfd9b0727e36d267de505"
- integrity sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==
-
parent-module@^1.0.0:
version "1.0.1"
resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2"
@@ -5627,14 +5432,6 @@ path-parse@^1.0.7:
resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735"
integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==
-path-scurry@^1.11.1:
- version "1.11.1"
- resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-1.11.1.tgz#7960a668888594a0720b12a911d1a742ab9f11d2"
- integrity sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==
- dependencies:
- lru-cache "^10.2.0"
- minipass "^5.0.0 || ^6.0.2 || ^7.0.0"
-
path-type@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b"
@@ -5674,15 +5471,6 @@ pkg-dir@^4.2.0:
dependencies:
find-up "^4.0.0"
-plist@^3.0.5:
- version "3.1.0"
- resolved "https://registry.yarnpkg.com/plist/-/plist-3.1.0.tgz#797a516a93e62f5bde55e0b9cc9c967f860893c9"
- integrity sha512-uysumyrvkUX0rX/dEVqt8gC3sTBzd4zoWfLeS29nb53imdaXVvLINYXTI2GNqzaMuvacNx4uJQ8+b3zXR0pkgQ==
- dependencies:
- "@xmldom/xmldom" "^0.8.8"
- base64-js "^1.5.1"
- xmlbuilder "^15.1.1"
-
possible-typed-array-names@^1.0.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz#93e3582bc0e5426586d9d07b79ee40fc841de4ae"
@@ -6089,11 +5877,6 @@ safe-regex-test@^1.1.0:
es-errors "^1.3.0"
is-regex "^1.2.1"
-sax@>=0.6.0:
- version "1.4.1"
- resolved "https://registry.yarnpkg.com/sax/-/sax-1.4.1.tgz#44cc8988377f126304d3b3fc1010c733b929ef0f"
- integrity sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==
-
scheduler@0.24.0-canary-efb381bbf-20230505:
version "0.24.0-canary-efb381bbf-20230505"
resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.24.0-canary-efb381bbf-20230505.tgz#5dddc60e29f91cd7f8b983d7ce4a99c2202d178f"
@@ -6275,20 +6058,6 @@ signal-exit@^3.0.2, signal-exit@^3.0.3, signal-exit@^3.0.7:
resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9"
integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==
-signal-exit@^4.0.1:
- version "4.1.0"
- resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-4.1.0.tgz#952188c1cbd546070e2dd20d0f41c0ae0530cb04"
- integrity sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==
-
-simple-plist@^1.1.0:
- version "1.3.1"
- resolved "https://registry.yarnpkg.com/simple-plist/-/simple-plist-1.3.1.tgz#16e1d8f62c6c9b691b8383127663d834112fb017"
- integrity sha512-iMSw5i0XseMnrhtIzRb7XpQEXepa9xhWxGUojHBL43SIpQuDQkh3Wpy67ZbDzZVr6EKxvwVChnVpdl8hEVLDiw==
- dependencies:
- bplist-creator "0.1.0"
- bplist-parser "0.3.1"
- plist "^3.0.5"
-
sisteransi@^1.0.5:
version "1.0.5"
resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed"
@@ -6308,11 +6077,6 @@ slice-ansi@^2.0.0:
astral-regex "^1.0.0"
is-fullwidth-code-point "^2.0.0"
-slugify@^1.6.6:
- version "1.6.6"
- resolved "https://registry.yarnpkg.com/slugify/-/slugify-1.6.6.tgz#2d4ac0eacb47add6af9e04d3be79319cbcc7924b"
- integrity sha512-h+z7HKHYXj6wJU+AnS/+IH8Uh9fdcX1Lrhg1/VMdf9PwoBQXFcXiAdsy2tSK0P6gKwJLXp02r90ahUCqHk9rrw==
-
source-map-support@0.5.13:
version "0.5.13"
resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.13.tgz#31b24a9c2e73c2de85066c0feb7d44767ed52932"
@@ -6373,11 +6137,6 @@ statuses@~1.5.0:
resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c"
integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==
-stream-buffers@2.2.x:
- version "2.2.0"
- resolved "https://registry.yarnpkg.com/stream-buffers/-/stream-buffers-2.2.0.tgz#91d5f5130d1cef96dcfa7f726945188741d09ee4"
- integrity sha512-uyQK/mx5QjHun80FLJTfaWE7JtwfRMKBLkMne6udYOmvH0CawotVa7TfgYHzAnpphn4+TweIx1QKMnRIbipmUg==
-
string-length@^4.0.1:
version "4.0.2"
resolved "https://registry.yarnpkg.com/string-length/-/string-length-4.0.2.tgz#a8a8dc7bd5c1a82b9b3c8b87e125f66871b6e57a"
@@ -6391,15 +6150,6 @@ string-natural-compare@^3.0.1:
resolved "https://registry.yarnpkg.com/string-natural-compare/-/string-natural-compare-3.0.1.tgz#7a42d58474454963759e8e8b7ae63d71c1e7fdf4"
integrity sha512-n3sPwynL1nwKi3WJ6AIsClwBMa0zTi54fn2oLU6ndfTSIO05xaznjSf15PcBZU6FNWbmN5Q6cxT4V5hGvB4taw==
-"string-width-cjs@npm:string-width@^4.2.0":
- version "4.2.3"
- resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010"
- integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
- dependencies:
- emoji-regex "^8.0.0"
- is-fullwidth-code-point "^3.0.0"
- strip-ansi "^6.0.1"
-
string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3:
version "4.2.3"
resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010"
@@ -6409,15 +6159,6 @@ string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3:
is-fullwidth-code-point "^3.0.0"
strip-ansi "^6.0.1"
-string-width@^5.0.1, string-width@^5.1.2:
- version "5.1.2"
- resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794"
- integrity sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==
- dependencies:
- eastasianwidth "^0.2.0"
- emoji-regex "^9.2.2"
- strip-ansi "^7.0.1"
-
string.prototype.matchall@^4.0.12:
version "4.0.12"
resolved "https://registry.yarnpkg.com/string.prototype.matchall/-/string.prototype.matchall-4.0.12.tgz#6c88740e49ad4956b1332a911e949583a275d4c0"
@@ -6484,13 +6225,6 @@ string_decoder@^1.1.1:
dependencies:
safe-buffer "~5.2.0"
-"strip-ansi-cjs@npm:strip-ansi@^6.0.1":
- version "6.0.1"
- resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9"
- integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==
- dependencies:
- ansi-regex "^5.0.1"
-
strip-ansi@^5.0.0, strip-ansi@^5.2.0:
version "5.2.0"
resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-5.2.0.tgz#8c9a536feb6afc962bdfa5b104a5091c1ad9c0ae"
@@ -6505,13 +6239,6 @@ strip-ansi@^6.0.0, strip-ansi@^6.0.1:
dependencies:
ansi-regex "^5.0.1"
-strip-ansi@^7.0.1:
- version "7.1.0"
- resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45"
- integrity sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==
- dependencies:
- ansi-regex "^6.0.1"
-
strip-bom@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-4.0.0.tgz#9c3505c1db45bcedca3d9cf7a16f5c5aa3901878"
@@ -6537,13 +6264,6 @@ sudo-prompt@^9.0.0:
resolved "https://registry.yarnpkg.com/sudo-prompt/-/sudo-prompt-9.2.1.tgz#77efb84309c9ca489527a4e749f287e6bdd52afd"
integrity sha512-Mu7R0g4ig9TUuGSxJavny5Rv0egCEtpZRNMrZaYS1vxkiIxGiGUwoezU3LazIQ+KE04hTrTfNPgxU5gzi7F5Pw==
-supports-color@^5.3.0:
- version "5.5.0"
- resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f"
- integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==
- dependencies:
- has-flag "^3.0.0"
-
supports-color@^7.1.0:
version "7.2.0"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da"
@@ -6793,11 +6513,6 @@ utils-merge@1.0.1:
resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713"
integrity sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==
-uuid@^7.0.3:
- version "7.0.3"
- resolved "https://registry.yarnpkg.com/uuid/-/uuid-7.0.3.tgz#c5c9f2c8cf25dc0a372c4df1441c41f5bd0c680b"
- integrity sha512-DPSke0pXhTZgoF/d+WSt2QaKMCFSfx7QegxEWT+JOuHF5aWrKEn0G+ztjuJg/gG8/ItK+rbPCD/yNv8yyih6Cg==
-
v8-to-istanbul@^9.0.1:
version "9.3.0"
resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz#b9572abfa62bd556c16d75fdebc1a411d5ff3175"
@@ -6918,15 +6633,6 @@ word-wrap@^1.2.5:
resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.5.tgz#d2c45c6dd4fbce621a66f136cbe328afd0410b34"
integrity sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==
-"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0":
- version "7.0.0"
- resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43"
- integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==
- dependencies:
- ansi-styles "^4.0.0"
- string-width "^4.1.0"
- strip-ansi "^6.0.0"
-
wrap-ansi@^6.2.0:
version "6.2.0"
resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53"
@@ -6945,15 +6651,6 @@ wrap-ansi@^7.0.0:
string-width "^4.1.0"
strip-ansi "^6.0.0"
-wrap-ansi@^8.1.0:
- version "8.1.0"
- resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-8.1.0.tgz#56dc22368ee570face1b49819975d9b9a5ead214"
- integrity sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==
- dependencies:
- ansi-styles "^6.1.0"
- string-width "^5.0.1"
- strip-ansi "^7.0.1"
-
wrappy@1:
version "1.0.2"
resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
@@ -6988,37 +6685,6 @@ ws@^7, ws@^7.5.10:
resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.10.tgz#58b5c20dc281633f6c19113f39b349bd8bd558d9"
integrity sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==
-xcode@^3.0.1:
- version "3.0.1"
- resolved "https://registry.yarnpkg.com/xcode/-/xcode-3.0.1.tgz#3efb62aac641ab2c702458f9a0302696146aa53c"
- integrity sha512-kCz5k7J7XbJtjABOvkc5lJmkiDh8VhjVCGNiqdKCscmVpdVUpEAyXv1xmCLkQJ5dsHqx3IPO4XW+NTDhU/fatA==
- dependencies:
- simple-plist "^1.1.0"
- uuid "^7.0.3"
-
-xml2js@0.6.0:
- version "0.6.0"
- resolved "https://registry.yarnpkg.com/xml2js/-/xml2js-0.6.0.tgz#07afc447a97d2bd6507a1f76eeadddb09f7a8282"
- integrity sha512-eLTh0kA8uHceqesPqSE+VvO1CDDJWMwlQfB6LuN6T8w6MaDJ8Txm8P7s5cHD0miF0V+GGTZrDQfxPZQVsur33w==
- dependencies:
- sax ">=0.6.0"
- xmlbuilder "~11.0.0"
-
-xmlbuilder@^14.0.0:
- version "14.0.0"
- resolved "https://registry.yarnpkg.com/xmlbuilder/-/xmlbuilder-14.0.0.tgz#876b5aec4f05ffd5feb97b0a871c855d16fbeb8c"
- integrity sha512-ts+B2rSe4fIckR6iquDjsKbQFK2NlUk6iG5nf14mDEyldgoc2nEKZ3jZWMPTxGQwVgToSjt6VGIho1H8/fNFTg==
-
-xmlbuilder@^15.1.1:
- version "15.1.1"
- resolved "https://registry.yarnpkg.com/xmlbuilder/-/xmlbuilder-15.1.1.tgz#9dcdce49eea66d8d10b42cae94a79c3c8d0c2ec5"
- integrity sha512-yMqGBqtXyeN1e3TGYvgNgDVZ3j84W4cwkOXQswghol6APgZWaff9lnbvN7MHYJOiXsvGPXtjTYJEiC9J2wv9Eg==
-
-xmlbuilder@~11.0.0:
- version "11.0.1"
- resolved "https://registry.yarnpkg.com/xmlbuilder/-/xmlbuilder-11.0.1.tgz#be9bae1c8a046e76b31127726347d0ad7002beb3"
- integrity sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==
-
y18n@^4.0.0:
version "4.0.3"
resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.3.tgz#b5f259c82cd6e336921efd7bfd8bf560de9eeedf"
diff --git a/ios/Voice/Voice.mm b/ios/Voice/Voice.mm
index e36949e0..082ce01e 100644
--- a/ios/Voice/Voice.mm
+++ b/ios/Voice/Voice.mm
@@ -29,8 +29,6 @@ @interface Voice ()
@implementation Voice {
}
-
-
///** Returns "YES" if no errors had occurred */
- (BOOL)setupAudioSession {
if ([self isHeadsetPluggedIn] || [self isHeadSetBluetooth]) {
@@ -68,7 +66,7 @@ - (BOOL)setupAudioSession {
return YES;
}
- - (BOOL)isHeadsetPluggedIn {
+- (BOOL)isHeadsetPluggedIn {
AVAudioSessionRouteDescription *route =
[[AVAudioSession sharedInstance] currentRoute];
for (AVAudioSessionPortDescription *desc in [route outputs]) {
@@ -79,7 +77,7 @@ - (BOOL)isHeadsetPluggedIn {
return NO;
}
- - (BOOL)isHeadSetBluetooth {
+- (BOOL)isHeadSetBluetooth {
NSArray *arrayInputs = [[AVAudioSession sharedInstance] availableInputs];
for (AVAudioSessionPortDescription *port in arrayInputs) {
if ([port.portType isEqualToString:AVAudioSessionPortBluetoothHFP]) {
@@ -164,11 +162,6 @@ - (void)setupAndTranscribeFile:(NSString *)filePath
self.speechRecognizer.delegate = self;
- [self sendEventWithName:@"onTranscriptionError"
- body:@{
- @"error" :
- @{@"code" : @"fake_error", @"message" : filePath}
- }];
// Set up recognition request
self.recognitionUrlRequest = [[SFSpeechURLRecognitionRequest alloc]
initWithURL:[NSURL fileURLWithPath:filePath]];
@@ -271,204 +264,208 @@ - (void)setupAndTranscribeFile:(NSString *)filePath
}
- (void)setupAndStartRecognizing:(NSString *)localeStr {
- self.audioSession = [AVAudioSession sharedInstance];
- self.priorAudioCategory = [self.audioSession category];
- // Tear down resources before starting speech recognition..
- [self teardown];
-
- self.sessionId = [[NSUUID UUID] UUIDString];
-
- NSLocale *locale = nil;
- if ([localeStr length] > 0) {
- locale = [NSLocale localeWithLocaleIdentifier:localeStr];
- }
-
- if (locale) {
- self.speechRecognizer = [[SFSpeechRecognizer alloc] initWithLocale:locale];
- } else {
- self.speechRecognizer = [[SFSpeechRecognizer alloc] init];
- }
-
- self.speechRecognizer.delegate = self;
-
- // Start audio session...
- if (![self setupAudioSession]) {
- [self teardown];
- return;
- }
-
- self.recognitionRequest =
- [[SFSpeechAudioBufferRecognitionRequest alloc] init];
- // Configure request so that results are returned before audio
- // recording is finished
- self.recognitionRequest.shouldReportPartialResults = YES;
-
- if (self.recognitionRequest == nil) {
- [self sendResult:@{@"code" : @"recognition_init"}:nil:nil:nil];
- [self teardown];
- return;
- }
-
- if (self.audioEngine == nil) {
- self.audioEngine = [[AVAudioEngine alloc] init];
- }
-
- @try {
- AVAudioInputNode *inputNode = self.audioEngine.inputNode;
- if (inputNode == nil) {
- [self sendResult:@{@"code" : @"input"}:nil:nil:nil];
- [self teardown];
- return;
- }
-
- [self sendEventWithName:@"onSpeechStart" body:nil];
-
- // A recognition task represents a speech recognition session.
- // We keep a reference to the task so that it can be cancelled.
- NSString *taskSessionId = self.sessionId;
- self.recognitionTask = [self.speechRecognizer
- recognitionTaskWithRequest:self.recognitionRequest
- resultHandler:^(
- SFSpeechRecognitionResult *_Nullable result,
- NSError *_Nullable error) {
- if (![taskSessionId isEqualToString:self.sessionId]) {
- // session ID has changed, so ignore any
- // capture results and error
- [self teardown];
- return;
- }
- if (error != nil) {
- NSString *errorMessage = [NSString
- stringWithFormat:@"%ld/%@", error.code,
- [error localizedDescription]];
- [self sendResult:@{
- @"code" : @"recognition_fail_ooo",
- @"message" : errorMessage
- }:nil:nil:nil];
- [self teardown];
- return;
- }
+ self.audioSession = [AVAudioSession sharedInstance];
+ self.priorAudioCategory = [self.audioSession category];
+ // Tear down resources before starting speech recognition..
+ [self teardown];
- // No result.
- if (result == nil) {
- [self sendEventWithName:@"onSpeechEnd" body:nil];
- [self teardown];
- return;
- }
+ self.sessionId = [[NSUUID UUID] UUIDString];
- BOOL isFinal = result.isFinal;
+ NSLocale *locale = nil;
+ if ([localeStr length] > 0) {
+ locale = [NSLocale localeWithLocaleIdentifier:localeStr];
+ }
- NSMutableArray *transcriptionDics = [NSMutableArray new];
- for (SFTranscription *transcription in result
- .transcriptions) {
- [transcriptionDics
- addObject:transcription.formattedString];
- }
+ if (locale) {
+ self.speechRecognizer = [[SFSpeechRecognizer alloc] initWithLocale:locale];
+ } else {
+ self.speechRecognizer = [[SFSpeechRecognizer alloc] init];
+ }
+
+ self.speechRecognizer.delegate = self;
+
+ // Start audio session...
+ if (![self setupAudioSession]) {
+ [self teardown];
+ return;
+ }
+
+ self.recognitionRequest =
+ [[SFSpeechAudioBufferRecognitionRequest alloc] init];
+ // Configure request so that results are returned before audio
+ // recording is finished
+ self.recognitionRequest.shouldReportPartialResults = YES;
+ // Set task hint for better end-of-speech detection (like dictation)
+ self.recognitionRequest.taskHint = SFSpeechRecognitionTaskHintDictation;
+ // Ensure continuous mode is off for auto-stop behavior
+ self.continuous = NO;
+
+ if (self.recognitionRequest == nil) {
+ [self sendResult:@{@"code" : @"recognition_init"}:nil:nil:nil];
+ [self teardown];
+ return;
+ }
+
+ if (self.audioEngine == nil) {
+ self.audioEngine = [[AVAudioEngine alloc] init];
+ }
+
+ @try {
+ AVAudioInputNode *inputNode = self.audioEngine.inputNode;
+ if (inputNode == nil) {
+ [self sendResult:@{@"code" : @"input"}:nil:nil:nil];
+ [self teardown];
+ return;
+ }
+
+ [self sendEventWithName:@"onSpeechStart" body:nil];
+
+ // A recognition task represents a speech recognition session.
+ // We keep a reference to the task so that it can be cancelled.
+ NSString *taskSessionId = self.sessionId;
+ self.recognitionTask = [self.speechRecognizer
+ recognitionTaskWithRequest:self.recognitionRequest
+ resultHandler:^(
+ SFSpeechRecognitionResult *_Nullable result,
+ NSError *_Nullable error) {
+ if (![taskSessionId isEqualToString:self.sessionId]) {
+ // session ID has changed, so ignore any
+ // capture results and error
+ [self teardown];
+ return;
+ }
+ if (error != nil) {
+ NSString *errorMessage = [NSString
+ stringWithFormat:@"%ld/%@", error.code,
+ [error localizedDescription]];
+ [self sendResult:@{
+ @"code" : @"recognition_fail_ooo",
+ @"message" : errorMessage
+ }:nil:nil:nil];
+ [self teardown];
+ return;
+ }
- [self sendResult :nil :result.bestTranscription.formattedString :transcriptionDics :[NSNumber numberWithBool:isFinal]];
+ // No result.
+ if (result == nil) {
+ [self sendEventWithName:@"onSpeechEnd" body:nil];
+ [self teardown];
+ return;
+ }
- if (isFinal || self.recognitionTask.isCancelled ||
- self.recognitionTask.isFinishing) {
- [self sendEventWithName:@"onSpeechEnd" body:nil];
- if (!self.continuous) {
- [self teardown];
- }
- return;
+ BOOL isFinal = result.isFinal;
+
+ NSMutableArray *transcriptionDics = [NSMutableArray new];
+ for (SFTranscription *transcription in result
+ .transcriptions) {
+ [transcriptionDics
+ addObject:transcription.formattedString];
+ }
+
+ [self sendResult :nil :result.bestTranscription.formattedString :transcriptionDics :[NSNumber numberWithBool:isFinal]];
+
+ if (isFinal || self.recognitionTask.isCancelled ||
+ self.recognitionTask.isFinishing) {
+ [self sendEventWithName:@"onSpeechEnd" body:nil];
+ if (!self.continuous) {
+ [self teardown];
+ }
+ return;
+ }
+ }];
+
+ AVAudioFormat *recordingFormat = [inputNode outputFormatForBus:0];
+ AVAudioMixerNode *mixer = [[AVAudioMixerNode alloc] init];
+ [self.audioEngine attachNode:mixer];
+
+ // Start recording and append recording buffer to speech recognizer
+ @try {
+ [mixer
+ installTapOnBus:0
+ bufferSize:1024
+ format:recordingFormat
+ block:^(AVAudioPCMBuffer *_Nonnull buffer,
+ AVAudioTime *_Nonnull when) {
+ // Volume Level Metering
+ UInt32 inNumberFrames = buffer.frameLength;
+ float LEVEL_LOWPASS_TRIG = 0.5;
+ if (buffer.format.channelCount > 0) {
+ Float32 *samples =
+ (Float32 *)buffer.floatChannelData[0];
+ Float32 avgValue = 0;
+
+ vDSP_maxmgv((Float32 *)samples, 1, &avgValue,
+ inNumberFrames);
+ self.averagePowerForChannel0 =
+ (LEVEL_LOWPASS_TRIG *
+ ((avgValue == 0) ? -100
+ : 20.0 * log10f(avgValue))) +
+ ((1 - LEVEL_LOWPASS_TRIG) *
+ self.averagePowerForChannel0);
+ self.averagePowerForChannel1 =
+ self.averagePowerForChannel0;
+ }
+
+ if (buffer.format.channelCount > 1) {
+ Float32 *samples =
+ (Float32 *)buffer.floatChannelData[1];
+ Float32 avgValue = 0;
+
+ vDSP_maxmgv((Float32 *)samples, 1, &avgValue,
+ inNumberFrames);
+ self.averagePowerForChannel1 =
+ (LEVEL_LOWPASS_TRIG *
+ ((avgValue == 0) ? -100
+ : 20.0 * log10f(avgValue))) +
+ ((1 - LEVEL_LOWPASS_TRIG) *
+ self.averagePowerForChannel1);
+ }
+ // Normalizing the Volume Value on scale of (0-10)
+ self.averagePowerForChannel1 =
+ [self _normalizedPowerLevelFromDecibels:
+ self.averagePowerForChannel1] *
+ 10;
+ NSNumber *value = [NSNumber
+ numberWithFloat:self.averagePowerForChannel1];
+ [self sendEventWithName:@"onSpeechVolumeChanged"
+ body:@{@"value" : value}];
+
+ // Todo: write recording buffer to file (if user
+ // opts in)
+ if (self.recognitionRequest != nil) {
+ [self.recognitionRequest appendAudioPCMBuffer:buffer];
}
}];
+ } @catch (NSException *exception) {
+ NSLog(@"[Error] - %@ %@", exception.name, exception.reason);
+ [self sendResult:@{
+ @"code" : @"start_recording",
+ @"message" : [exception reason]
+ }:nil:nil:nil];
+ [self teardown];
+ return;
+ } @finally {
+ }
- AVAudioFormat *recordingFormat = [inputNode outputFormatForBus:0];
- AVAudioMixerNode *mixer = [[AVAudioMixerNode alloc] init];
- [self.audioEngine attachNode:mixer];
-
- // Start recording and append recording buffer to speech recognizer
- @try {
- [mixer
- installTapOnBus:0
- bufferSize:1024
- format:recordingFormat
- block:^(AVAudioPCMBuffer *_Nonnull buffer,
- AVAudioTime *_Nonnull when) {
- // Volume Level Metering
- UInt32 inNumberFrames = buffer.frameLength;
- float LEVEL_LOWPASS_TRIG = 0.5;
- if (buffer.format.channelCount > 0) {
- Float32 *samples =
- (Float32 *)buffer.floatChannelData[0];
- Float32 avgValue = 0;
-
- vDSP_maxmgv((Float32 *)samples, 1, &avgValue,
- inNumberFrames);
- self.averagePowerForChannel0 =
- (LEVEL_LOWPASS_TRIG *
- ((avgValue == 0) ? -100
- : 20.0 * log10f(avgValue))) +
- ((1 - LEVEL_LOWPASS_TRIG) *
- self.averagePowerForChannel0);
- self.averagePowerForChannel1 =
- self.averagePowerForChannel0;
- }
-
- if (buffer.format.channelCount > 1) {
- Float32 *samples =
- (Float32 *)buffer.floatChannelData[1];
- Float32 avgValue = 0;
-
- vDSP_maxmgv((Float32 *)samples, 1, &avgValue,
- inNumberFrames);
- self.averagePowerForChannel1 =
- (LEVEL_LOWPASS_TRIG *
- ((avgValue == 0) ? -100
- : 20.0 * log10f(avgValue))) +
- ((1 - LEVEL_LOWPASS_TRIG) *
- self.averagePowerForChannel1);
- }
- // Normalizing the Volume Value on scale of (0-10)
- self.averagePowerForChannel1 =
- [self _normalizedPowerLevelFromDecibels:
- self.averagePowerForChannel1] *
- 10;
- NSNumber *value = [NSNumber
- numberWithFloat:self.averagePowerForChannel1];
- [self sendEventWithName:@"onSpeechVolumeChanged"
- body:@{@"value" : value}];
-
- // Todo: write recording buffer to file (if user
- // opts in)
- if (self.recognitionRequest != nil) {
- [self.recognitionRequest appendAudioPCMBuffer:buffer];
- }
- }];
- } @catch (NSException *exception) {
- NSLog(@"[Error] - %@ %@", exception.name, exception.reason);
- [self sendResult:@{
- @"code" : @"start_recording",
- @"message" : [exception reason]
- }:nil:nil:nil];
- [self teardown];
- return;
- } @finally {
- }
-
- [self.audioEngine connect:inputNode to:mixer format:recordingFormat];
- [self.audioEngine prepare];
- NSError *audioSessionError = nil;
- [self.audioEngine startAndReturnError:&audioSessionError];
- if (audioSessionError != nil) {
- [self sendResult:@{
- @"code" : @"audio",
- @"message" : [audioSessionError localizedDescription]
- }:nil:nil:nil];
- [self teardown];
- return;
- }
- } @catch (NSException *exception) {
- [self sendResult:@{
- @"code" : @"start_recording",
- @"message" : [exception reason]
- }:nil:nil:nil];
- return;
- }
+ [self.audioEngine connect:inputNode to:mixer format:recordingFormat];
+ [self.audioEngine prepare];
+ NSError *audioSessionError = nil;
+ [self.audioEngine startAndReturnError:&audioSessionError];
+ if (audioSessionError != nil) {
+ [self sendResult:@{
+ @"code" : @"audio",
+ @"message" : [audioSessionError localizedDescription]
+ }:nil:nil:nil];
+ [self teardown];
+ return;
+ }
+ } @catch (NSException *exception) {
+ [self sendResult:@{
+ @"code" : @"start_recording",
+ @"message" : [exception reason]
+ }:nil:nil:nil];
+ return;
+ }
}
- (CGFloat)_normalizedPowerLevelFromDecibels:(CGFloat)decibels {
@@ -557,66 +554,63 @@ - (void)speechRecognizer:(SFSpeechRecognizer *)speechRecognizer
RCT_EXPORT_METHOD(isSpeechAvailable : (RCTResponseSenderBlock)callback) {
[SFSpeechRecognizer
requestAuthorization:^(SFSpeechRecognizerAuthorizationStatus status) {
- switch (status) {
- case SFSpeechRecognizerAuthorizationStatusAuthorized:
+ switch (status) {
+ case SFSpeechRecognizerAuthorizationStatusAuthorized:
callback(@[ @true ]);
- break;
- default:
+ break;
+ default:
callback(@[ @false ]);
- }
- }];
+ }
+ }];
}
RCT_EXPORT_METHOD(isRecognizing : (RCTResponseSenderBlock)callback) {
- if (self.recognitionTask != nil) {
- switch (self.recognitionTask.state) {
- case SFSpeechRecognitionTaskStateRunning:
- callback(@[ @true ]);
- break;
- default:
- callback(@[ @false ]);
- }
- } else {
- callback(@[ @false ]);
- }
+ if (self.recognitionTask != nil) {
+ switch (self.recognitionTask.state) {
+ case SFSpeechRecognitionTaskStateRunning:
+ callback(@[ @true ]);
+ break;
+ default:
+ callback(@[ @false ]);
+ }
+ } else {
+ callback(@[ @false ]);
+ }
}
-RCT_EXPORT_METHOD(startSpeech
- : (NSString *)localeStr callback
- : (RCTResponseSenderBlock)callback) {
- if (self.recognitionTask != nil) {
- [self sendResult:RCTMakeError(@"Speech recognition already started!", nil,
- nil):nil:nil:nil];
- return;
- }
-
- [SFSpeechRecognizer requestAuthorization:^(
- SFSpeechRecognizerAuthorizationStatus status) {
- switch (status) {
- case SFSpeechRecognizerAuthorizationStatusNotDetermined:
- [self sendResult:RCTMakeError(@"Speech recognition not yet authorized",
- nil, nil):nil:nil:nil];
- break;
- case SFSpeechRecognizerAuthorizationStatusDenied:
- [self sendResult:RCTMakeError(@"User denied access to speech recognition",
- nil, nil):nil:nil:nil];
- break;
- case SFSpeechRecognizerAuthorizationStatusRestricted:
- [self sendResult:RCTMakeError(
- @"Speech recognition restricted on this device", nil,
- nil):nil:nil:nil];
- break;
- case SFSpeechRecognizerAuthorizationStatusAuthorized:
- [self setupAndStartRecognizing:localeStr];
- break;
- }
- }];
- callback(@[ @false ]);
+RCT_EXPORT_METHOD(startSpeech : (NSString *)
+ localeStr callback : (RCTResponseSenderBlock)callback) {
+ if (self.recognitionTask != nil) {
+ [self sendResult:RCTMakeError(@"Speech recognition already started!", nil,
+ nil):nil:nil:nil];
+ return;
+ }
+
+ [SFSpeechRecognizer requestAuthorization:^(
+ SFSpeechRecognizerAuthorizationStatus status) {
+ switch (status) {
+ case SFSpeechRecognizerAuthorizationStatusNotDetermined:
+ [self sendResult:RCTMakeError(@"Speech recognition not yet authorized",
+ nil, nil):nil:nil:nil];
+ break;
+ case SFSpeechRecognizerAuthorizationStatusDenied:
+ [self sendResult:RCTMakeError(@"User denied access to speech recognition",
+ nil, nil):nil:nil:nil];
+ break;
+ case SFSpeechRecognizerAuthorizationStatusRestricted:
+ [self sendResult:RCTMakeError(
+ @"Speech recognition restricted on this device", nil,
+ nil):nil:nil:nil];
+ break;
+ case SFSpeechRecognizerAuthorizationStatusAuthorized:
+ [self setupAndStartRecognizing:localeStr];
+ break;
+ }
+ }];
+ callback(@[ @false ]);
}
-RCT_EXPORT_METHOD(startTranscription
- : (NSString *)filePath withLocaleStr
- : (NSString *)localeStr callback
- : (RCTResponseSenderBlock)callback) {
+RCT_EXPORT_METHOD(startTranscription : (NSString *)filePath withLocaleStr : (
+ NSString *)localeStr callback : (RCTResponseSenderBlock)callback) {
if (self.recognitionTask != nil) {
[self sendResult:RCTMakeError(@"Speech recognition already started!", nil,
nil):nil:nil:nil];
@@ -646,19 +640,16 @@ - (void)speechRecognizer:(SFSpeechRecognizer *)speechRecognizer
}];
callback(@[ @false ]);
}
-
-
+ (BOOL)requiresMainQueueSetup {
- return YES;
+ return YES;
}
// Don't compile this code when we build for the old architecture.
#ifdef RCT_NEW_ARCH_ENABLED
- (std::shared_ptr)getTurboModule:
- (const facebook::react::ObjCTurboModule::InitParams &)params
-{
- return std::make_shared(params);
+ (const facebook::react::ObjCTurboModule::InitParams &)params {
+ return std::make_shared(params);
}
#endif
@@ -668,5 +659,4 @@ - (dispatch_queue_t)methodQueue {
RCT_EXPORT_MODULE()
-
@end
diff --git a/package.json b/package.json
index 6576384a..5f321df8 100644
--- a/package.json
+++ b/package.json
@@ -1,10 +1,10 @@
{
- "name": "@react-native-voice/voice",
- "description": "React Native Native Voice library for iOS and Android",
- "version": "3.2.4",
- "author": "Sam Wenke ",
+ "name": "@dev-amirzubair/react-native-voice",
+ "description": "React Native Voice library for iOS and Android - Fork with New Architecture, Bridgeless mode, and React Native 0.76+ support",
+ "version": "1.0.1",
+ "author": "Amir Zubair ",
"private": false,
- "homepage": "https://github.com/react-native-voice/voice",
+ "homepage": "https://github.com/dev-amirzubair/voice",
"devDependencies": {
"@react-native-community/eslint-config": "^3.2.0",
"@semantic-release/git": "^10.0.1",
@@ -22,7 +22,13 @@
"ios",
"react-native",
"speech",
- "voice"
+ "voice",
+ "speech-to-text",
+ "speech-recognition",
+ "new-architecture",
+ "turbomodules",
+ "fabric",
+ "bridgeless"
],
"license": "MIT",
"source": "src/index.ts",
@@ -39,7 +45,7 @@
},
"repository": {
"type": "git",
- "url": "git://github.com/react-native-voice/voice.git"
+ "url": "git://github.com/dev-amirzubair/voice.git"
},
"files": [
"src",
@@ -58,7 +64,7 @@
"android": "yarn --cwd example android",
"prepare": "yarn build && yarn build:plugin",
"build": "tsc",
- "dev-sync": "cp -r ./dist example/node_modules/@react-native-voice/voice",
+ "dev-sync": "mkdir -p example/node_modules/@react-native-voice/voice/dist && cp -r ./dist/* example/node_modules/@react-native-voice/voice/dist/ && cp -r ./android example/node_modules/@react-native-voice/voice/ && cp -r ./ios example/node_modules/@react-native-voice/voice/ && cp -r ./src example/node_modules/@react-native-voice/voice/ && cp package.json react-native-voice.podspec example/node_modules/@react-native-voice/voice/",
"type-check": "tsc -noEmit",
"build:plugin": "tsc --build plugin",
"lint:plugin": "eslint plugin/src/*"
diff --git a/react-native-voice.podspec b/react-native-voice.podspec
index ffcdadee..087d3480 100644
--- a/react-native-voice.podspec
+++ b/react-native-voice.podspec
@@ -16,7 +16,7 @@ Pod::Spec.new do |s|
# Link with Speech framework
s.frameworks = ['Speech']
- s.source = { :git => "https://github.com/react-native-voice/voice.git" }
+ s.source = { :git => "https://github.com/dev-amirzubair/voice.git" }
s.source_files = "ios/**/*.{h,m,mm,cpp}"
diff --git a/src/NativeVoiceAndroid.ts b/src/NativeVoiceAndroid.ts
index f2ac10e6..ad16fb67 100644
--- a/src/NativeVoiceAndroid.ts
+++ b/src/NativeVoiceAndroid.ts
@@ -1,17 +1,22 @@
import type { TurboModule } from 'react-native';
import { TurboModuleRegistry } from 'react-native';
-type SpeechType = {
- EXTRA_LANGUAGE_MODEL: string;
- EXTRA_MAX_RESULTS: string;
- EXTRA_PARTIAL_RESULTS: string;
- REQUEST_PERMISSIONS_AUTO: string;
- RECOGNIZER_ENGINE: string;
+
+export type SpeechOptions = {
+ EXTRA_LANGUAGE_MODEL?: string;
+ EXTRA_MAX_RESULTS?: number;
+ EXTRA_PARTIAL_RESULTS?: boolean;
+ REQUEST_PERMISSIONS_AUTO?: boolean;
+ RECOGNIZER_ENGINE?: string;
+ EXTRA_SPEECH_INPUT_MINIMUM_LENGTH_MILLIS?: number;
+ EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS?: number;
+ EXTRA_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS?: number;
};
+
export interface Spec extends TurboModule {
destroySpeech: (callback: (error: string) => void) => void;
startSpeech: (
locale: string,
- opts: SpeechType,
+ opts: SpeechOptions,
callback: (error: string) => void
) => void;
stopSpeech: (callback: (error: string) => void) => void;
@@ -20,7 +25,12 @@ export interface Spec extends TurboModule {
callback: (isAvailable: boolean, error: string) => void
) => void;
getSpeechRecognitionServices(): Promise;
- isRecognizing: (callback: (Recognizing: boolean) => void) => void;
+ isRecognizing: (callback: (isRecognizing: boolean) => void) => void;
+ /**
+ * Add an event listener for speech recognition events
+ * Supported events: 'onSpeechStart', 'onSpeechRecognized', 'onSpeechEnd', 'onSpeechError', 'onSpeechResults', 'onSpeechPartialResults', 'onSpeechVolumeChanged'
+ * Note: Android does not support transcription events
+ */
addListener: (eventType: string) => void;
removeListeners: (count: number) => void;
}
diff --git a/src/NativeVoiceIOS.ts b/src/NativeVoiceIOS.ts
index 50b37a3e..78c13c24 100644
--- a/src/NativeVoiceIOS.ts
+++ b/src/NativeVoiceIOS.ts
@@ -5,6 +5,7 @@ export interface Spec extends TurboModule {
destroySpeech: (callback: (error: string) => void) => void;
startSpeech: (locale: string, callback: (error: string) => void) => void;
startTranscription: (
+ filePath: string,
locale: string,
callback: (error: string) => void,
) => void;
@@ -15,10 +16,15 @@ export interface Spec extends TurboModule {
isSpeechAvailable: (
callback: (isAvailable: boolean, error: string) => void,
) => void;
- isRecognizing: (callback: (Recognizing: boolean) => void) => void;
+ isRecognizing: (callback: (isRecognizing: boolean) => void) => void;
+ /**
+ * Add an event listener for speech recognition events
+ * Supported events: 'onSpeechStart', 'onSpeechRecognized', 'onSpeechEnd', 'onSpeechError', 'onSpeechResults', 'onSpeechPartialResults', 'onSpeechVolumeChanged', 'onTranscriptionStart', 'onTranscriptionEnd', 'onTranscriptionError', 'onTranscriptionResults'
+ */
addListener: (eventType: string) => void;
removeListeners: (count: number) => void;
destroyTranscription: (callback: (error: string) => void) => void;
}
-export default TurboModuleRegistry.getEnforcing('Voice');
+// Use get() instead of getEnforcing() to allow graceful fallback
+export default TurboModuleRegistry.get('Voice');
diff --git a/src/VoiceModuleTypes.ts b/src/VoiceModuleTypes.ts
index 53e0160c..45964700 100644
--- a/src/VoiceModuleTypes.ts
+++ b/src/VoiceModuleTypes.ts
@@ -31,8 +31,14 @@ export type SpeechResultsEvent = {
value?: string[];
};
+export type TranscriptionSegment = {
+ transcription?: string;
+ timestamp?: number;
+ duration?: number;
+};
+
export type TranscriptionResultsEvent = {
- segments?: string[];
+ segments?: TranscriptionSegment[];
transcription?: string;
isFinal?: boolean;
};
diff --git a/src/index.ts b/src/index.ts
index 71ffd04f..b0aa5c6f 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -1,10 +1,10 @@
import {
NativeModules,
+ DeviceEventEmitter,
NativeEventEmitter,
Platform,
type EventSubscription,
} from 'react-native';
-import invariant from 'invariant';
import {
type SpeechEvents,
type TranscriptionEvents,
@@ -26,29 +26,61 @@ const LINKING_ERROR =
'- You rebuilt the app after installing the package\n' +
'- You are not using Expo Go\n';
-//@ts-expect-error
+//@ts-expect-error - Check if TurboModules are enabled (new architecture)
const isTurboModuleEnabled = global.__turboModuleProxy != null;
-const VoiceNativeModule = isTurboModuleEnabled
- ? Platform.OS === 'android'
- ? require('./NativeVoiceAndroid').default
- : require('./NativeVoiceIOS').default
- : NativeModules.Voice;
+//@ts-expect-error - Check if Bridgeless mode is enabled
+const isBridgelessEnabled = global.RN$Bridgeless === true;
-const Voice = VoiceNativeModule
- ? VoiceNativeModule
- : new Proxy(
- {},
- {
- get() {
- throw new Error(LINKING_ERROR);
- },
- },
- );
+// Try to get the native module - with fallback for Bridgeless mode
+const getVoiceModule = () => {
+ // Try TurboModule first if enabled
+ if (isTurboModuleEnabled) {
+ try {
+ const turboModule = Platform.OS === 'android'
+ ? require('./NativeVoiceAndroid').default
+ : require('./NativeVoiceIOS').default;
+ if (turboModule) {
+ return turboModule;
+ }
+ } catch (e) {
+ // TurboModule not available, fall through to NativeModules
+ }
+ }
+
+ // Fallback to NativeModules (works in both Bridge and Bridgeless mode)
+ return NativeModules.Voice;
+};
+
+const Voice = getVoiceModule() || new Proxy(
+ {},
+ {
+ get() {
+ throw new Error(LINKING_ERROR);
+ },
+ },
+);
-// NativeEventEmitter is only availabe on React Native platforms, so this conditional is used to avoid import conflicts in the browser/server
-const voiceEmitter =
- Platform.OS !== 'web' ? new NativeEventEmitter(Voice) : null;
+// Platform-specific event emitter setup:
+// - iOS: Always uses RCTEventEmitter (module-specific), needs NativeEventEmitter
+// - Android: Uses RCTDeviceEventEmitter (global), needs DeviceEventEmitter
+const voiceEmitter = (() => {
+ if (Platform.OS === 'web') {
+ return null;
+ }
+
+ // iOS always uses NativeEventEmitter with the Voice module
+ if (Platform.OS === 'ios') {
+ try {
+ return Voice ? new NativeEventEmitter(Voice) : DeviceEventEmitter;
+ } catch (e) {
+ return DeviceEventEmitter;
+ }
+ }
+
+ // Android uses DeviceEventEmitter (global event bus)
+ return DeviceEventEmitter;
+})();
type SpeechEvent = keyof SpeechEvents;
type TranscriptionEvent = keyof TranscriptionEvents;
@@ -59,7 +91,7 @@ class RCTVoice {
constructor() {
this._loaded = false;
- this._listeners = JSON.parse(JSON.stringify([]));
+ this._listeners = [];
this._events = {
onSpeechStart: () => {},
onSpeechRecognized: () => {},
@@ -122,16 +154,9 @@ class RCTVoice {
}
start(locale: string, options = {}) {
- if (
- !this._loaded &&
- this._listeners.length === 0 &&
- voiceEmitter !== null
- ) {
- this._listeners = (Object.keys(this._events) as SpeechEvent[]).map(
- (key: SpeechEvent) => voiceEmitter.addListener(key, this._events[key]),
- );
- }
-
+ // Ensure listeners are set up BEFORE starting recognition
+ this._setupListeners();
+
return new Promise((resolve, reject) => {
const callback = (error: string) => {
if (error) {
@@ -140,6 +165,7 @@ class RCTVoice {
resolve();
}
};
+
if (Platform.OS === 'android') {
Voice.startSpeech(
locale,
@@ -266,13 +292,13 @@ class RCTVoice {
/**
* (Android) Get a list of the speech recognition engines available on the device
* */
- getSpeechRecognitionServices() {
+ getSpeechRecognitionServices(): Promise {
if (Platform.OS !== 'android') {
- invariant(
- Voice,
- 'Speech recognition services can be queried for only on Android',
+ return Promise.reject(
+ new Error(
+ 'Speech recognition services can be queried for only on Android',
+ ),
);
- return;
}
return Voice.getSpeechRecognitionServices();
@@ -286,44 +312,101 @@ class RCTVoice {
set onSpeechStart(fn: (e: SpeechStartEvent) => void) {
this._events.onSpeechStart = fn;
+ this._setupListeners();
}
set onTranscriptionStart(fn: (e: TranscriptionStartEvent) => void) {
this._events.onTranscriptionStart = fn;
+ this._setupListeners();
}
set onSpeechRecognized(fn: (e: SpeechRecognizedEvent) => void) {
this._events.onSpeechRecognized = fn;
+ this._setupListeners();
}
set onSpeechEnd(fn: (e: SpeechEndEvent) => void) {
this._events.onSpeechEnd = fn;
+ this._setupListeners();
}
set onTranscriptionEnd(fn: (e: SpeechEndEvent) => void) {
this._events.onTranscriptionEnd = fn;
+ this._setupListeners();
}
set onSpeechError(fn: (e: SpeechErrorEvent) => void) {
this._events.onSpeechError = fn;
+ this._setupListeners();
}
set onTranscriptionError(fn: (e: TranscriptionErrorEvent) => void) {
this._events.onTranscriptionError = fn;
+ this._setupListeners();
}
set onSpeechResults(fn: (e: SpeechResultsEvent) => void) {
this._events.onSpeechResults = fn;
+ this._setupListeners();
}
set onTranscriptionResults(fn: (e: TranscriptionResultsEvent) => void) {
this._events.onTranscriptionResults = fn;
+ this._setupListeners();
}
set onSpeechPartialResults(fn: (e: SpeechResultsEvent) => void) {
this._events.onSpeechPartialResults = fn;
+ this._setupListeners();
}
set onSpeechVolumeChanged(fn: (e: SpeechVolumeChangeEvent) => void) {
this._events.onSpeechVolumeChanged = fn;
+ this._setupListeners();
+ }
+
+ private _setupListeners() {
+ if (voiceEmitter === null) {
+ return;
+ }
+
+ // Remove existing listeners before setting up new ones
+ if (this._listeners.length > 0) {
+ this._listeners.forEach(listener => {
+ try {
+ listener.remove();
+ } catch (e) {
+ // Ignore errors when removing listeners
+ }
+ });
+ this._listeners = [];
+ }
+
+ // Set up listeners for all events
+ const newListeners: EventSubscription[] = [];
+
+ (Object.keys(this._events) as SpeechEvent[]).forEach((key: SpeechEvent) => {
+ const handler = this._events[key];
+
+ if (!handler || typeof handler !== 'function') {
+ return;
+ }
+
+ const currentHandler = handler;
+
+ const listener = voiceEmitter!.addListener(key, (event: any) => {
+ if (currentHandler) {
+ try {
+ currentHandler(event);
+ } catch (error) {
+ // Handler error - silently ignore
+ }
+ }
+ });
+
+ newListeners.push(listener);
+ });
+
+ this._listeners = newListeners;
+ this._loaded = true;
}
}