From cb4bdbb29145780bacc6263649d7882167a1169a Mon Sep 17 00:00:00 2001 From: Tyler Han Date: Tue, 23 Feb 2021 14:08:41 -0500 Subject: [PATCH] fix: send intent (CORE-4946) (#35) * feat: intent request * chore: full test coverage * chore: remove unused * chore: section --- docs/advanced-usage.md | 3 +- lib/RuntimeClient/index.ts | 13 +++ tests/lib/Context/fixtures.ts | 19 ++++- tests/lib/RuntimeClient/index.unit.ts | 109 +++++++++++++++++--------- 4 files changed, 104 insertions(+), 40 deletions(-) diff --git a/docs/advanced-usage.md b/docs/advanced-usage.md index d86ab95..6493b00 100644 --- a/docs/advanced-usage.md +++ b/docs/advanced-usage.md @@ -92,11 +92,12 @@ Different interaction method have different side-effects on the conversation ses 1. `.start()` - Starts the conversation session and runs the application until it requests user input, at which point, the method returns the current `context`. If this is called while a conversation session is ongoing, then it starts a new conversation session from the beginning. 2. `.sendText(userInput)` - Advances the conversation session based on the user's input and then runs the application until it requests user input, at which point, the method returns the current `context`. +3. `.sendIntent(intentName, entities)` - Advances the conversation session based an intent being invoked - make sure that the `intentName` exists in the interaction model on your Voiceflow project. This bypasses NLP/NLU resolution, and is useful in explicitly triggering certain conversation paths. The method returns the current `context`. Now, only certain interaction methods are allowed to be called at certain points in conversation session. 1. `.start()` is callable any time. -2. `.sendText()` is callable only if the `RuntimeClient` contains some ongoing conversation session. That is, `runtimeClient.getContext().isEnding()` is `false`. If you call `.sendText()` when the return of the aforementioned `.isEnding()` call is `true`, then calling `.sendText()` throws an exception. +2. `.sendText()` and `.sendIntent()` are callable only if the `RuntimeClient` contains some ongoing conversation session. That is, `runtimeClient.getContext().isEnding()` is `false`. If you call `.sendText()` when the return of the aforementioned `.isEnding()` call is `true`, then calling `.sendText()` throws an exception. Thus, if `runtimeClient.getContext().isEnding()` is `true`, the only valid method you may call is `.start()` to restart the conversation session from the beginning. diff --git a/lib/RuntimeClient/index.ts b/lib/RuntimeClient/index.ts index 6af5194..f83de22 100644 --- a/lib/RuntimeClient/index.ts +++ b/lib/RuntimeClient/index.ts @@ -46,6 +46,19 @@ export class RuntimeClient = Record> return this.sendRequest({ type: RequestType.TEXT, payload: userInput }); } + async sendIntent( + name: string, + entities: { + name: string; + value: string; + query?: string; + }[] = [], + query = '', + confidence?: number + ): Promise> { + return this.sendRequest({ type: RequestType.INTENT, payload: { intent: { name }, entities, query, confidence } }); + } + async sendRequest(request: GeneralRequest) { if (this.context.isEnding()) { throw new VFClientError('RuntimeClient.sendText() was called but the conversation has ended'); diff --git a/tests/lib/Context/fixtures.ts b/tests/lib/Context/fixtures.ts index 33eeb90..a4225f1 100644 --- a/tests/lib/Context/fixtures.ts +++ b/tests/lib/Context/fixtures.ts @@ -1,4 +1,4 @@ -import { RequestType } from '@voiceflow/general-types'; +import { IntentRequest, RequestType } from '@voiceflow/general-types'; import { State } from '@voiceflow/runtime'; import _ from 'lodash'; @@ -67,6 +67,12 @@ export const START_RESPONSE_BODY_WITH_NO_CHOICES = { }; export const USER_RESPONSE = 'This is what the user says in response to the voice assistant'; +export const INTENT_RESPONSE: IntentRequest['payload'] = { + intent: { name: 'anyIntent' }, + entities: [{ name: 'foo', value: 'bar' }], + query: USER_RESPONSE, + confidence: 1, +}; export const SEND_TEXT_REQUEST_BODY: RequestContext = { state: VF_APP_NEXT_STATE_1, @@ -79,6 +85,17 @@ export const SEND_TEXT_REQUEST_BODY: RequestContext = { }, }; +export const SEND_INTENT_REQUEST_BODY: RequestContext = { + state: VF_APP_NEXT_STATE_1, + request: { + type: RequestType.INTENT, + payload: INTENT_RESPONSE, + }, + config: { + tts: false, + }, +}; + export const SEND_TEXT_REQUEST_BODY_TTS_ON: RequestContext = { state: VF_APP_NEXT_STATE_1, request: { diff --git a/tests/lib/RuntimeClient/index.unit.ts b/tests/lib/RuntimeClient/index.unit.ts index 32d81f1..0182c52 100644 --- a/tests/lib/RuntimeClient/index.unit.ts +++ b/tests/lib/RuntimeClient/index.unit.ts @@ -4,13 +4,16 @@ import _ from 'lodash'; import sinon from 'sinon'; import RuntimeClient from '@/lib/RuntimeClient'; -import { DataConfig, TraceType, TRACE_EVENT } from '@/lib/types'; +import { DataConfig, TRACE_EVENT, TraceType } from '@/lib/types'; +import { makeTraceProcessor } from '@/lib/Utils/makeTraceProcessor'; import { CHOICE_TRACE, CHOICES_1, CHOICES_2, CHOICES_3, + INTENT_RESPONSE, + SEND_INTENT_REQUEST_BODY, SEND_TEXT_REQUEST_BODY, SEND_TEXT_REQUEST_BODY_TTS_ON, SEND_TEXT_RESPONSE_BODY, @@ -23,7 +26,6 @@ import { VF_APP_INITIAL_STATE, } from '../Context/fixtures'; import { AUDIO_TRACE, BLOCK_TRACE, DEBUG_TRACE, END_TRACE, FLOW_TRACE, SPEAK_TRACE } from '../fixtures'; -import { makeTraceProcessor } from '@/lib/Utils/makeTraceProcessor'; chai.use(chaiAsPromise); @@ -107,6 +109,48 @@ describe('RuntimeClient', () => { expect(data.toJSON()).to.eql(SEND_TEXT_RESPONSE_BODY); }); + it('sendIntent', async () => { + const { agent, client } = createRuntimeClient(); + + client.interact.resolves(START_RESPONSE_BODY); + + await agent.start(); + + client.interact.resolves(SEND_TEXT_RESPONSE_BODY); + + const data = await agent.sendIntent(INTENT_RESPONSE.intent.name, INTENT_RESPONSE.entities, INTENT_RESPONSE.query, INTENT_RESPONSE.confidence); + + expect(client.interact.callCount).to.eql(2); + expect(client.interact.args[1]).to.eql([SEND_INTENT_REQUEST_BODY]); + + expect(data.toJSON()).to.eql(SEND_TEXT_RESPONSE_BODY); + }); + + it('sendIntent, empty', async () => { + const { agent, client } = createRuntimeClient(); + + client.interact.resolves(START_RESPONSE_BODY); + + await agent.start(); + + client.interact.resolves(SEND_TEXT_RESPONSE_BODY); + + const data = await agent.sendIntent(INTENT_RESPONSE.intent.name); + + expect(client.interact.callCount).to.eql(2); + expect(client.interact.args[1]).to.eql([ + { + ...SEND_INTENT_REQUEST_BODY, + request: { + ...SEND_INTENT_REQUEST_BODY.request, + payload: { intent: { name: INTENT_RESPONSE.intent.name }, entities: [], query: '', confidence: undefined }, + }, + }, + ]); + + expect(data.toJSON()).to.eql(SEND_TEXT_RESPONSE_BODY); + }); + it('sendText, empty', async () => { const { agent, client } = createRuntimeClient(); @@ -269,10 +313,10 @@ describe('RuntimeClient', () => { describe('events', () => { it('on', async () => { const { agent, client } = createRuntimeClient(); - + const result1: any[] = []; const result2: any[] = []; - + agent.on(TraceType.SPEAK, (trace, context) => { result1.push(trace, context); }); @@ -284,16 +328,20 @@ describe('RuntimeClient', () => { const context = await agent.start(); - expect(result1).to.eql([ - SPEAK_TRACE, context, - ]); + expect(result1).to.eql([SPEAK_TRACE, context]); expect(result2).to.eql([ - SPEAK_TRACE, context, - BLOCK_TRACE, context, - FLOW_TRACE, context, - AUDIO_TRACE, context, - DEBUG_TRACE, context, - CHOICE_TRACE, context + SPEAK_TRACE, + context, + BLOCK_TRACE, + context, + FLOW_TRACE, + context, + AUDIO_TRACE, + context, + DEBUG_TRACE, + context, + CHOICE_TRACE, + context, ]); }); @@ -304,7 +352,7 @@ describe('RuntimeClient', () => { const callback = () => { agent.on(BAD_TRACE_TYPE, () => {}); - } + }; expect(callback).to.throw(); }); @@ -314,11 +362,11 @@ describe('RuntimeClient', () => { const results: any = {}; Object.keys(TraceType) - .map(trace => trace.toLowerCase()) + .map((trace) => trace.toLowerCase()) .forEach((trace) => { results[trace] = []; }); - + const insertToResults = (trace: any, context: any) => { results[trace.type].push(trace, context); }; @@ -340,36 +388,21 @@ describe('RuntimeClient', () => { const context2 = await agent.sendText('some nonsense'); - expect(results[TraceType.SPEAK]).to.eql([ - SPEAK_TRACE, context1, - SPEAK_TRACE, context2 - ]); + expect(results[TraceType.SPEAK]).to.eql([SPEAK_TRACE, context1, SPEAK_TRACE, context2]); expect(results[TraceType.VISUAL]).to.eql([]); - expect(results[TraceType.FLOW]).to.eql([ - FLOW_TRACE, context1 - ]); + expect(results[TraceType.FLOW]).to.eql([FLOW_TRACE, context1]); - expect(results[TraceType.END]).to.eql([ - END_TRACE, context2 - ]); + expect(results[TraceType.END]).to.eql([END_TRACE, context2]); - expect(results[TraceType.DEBUG]).to.eql([ - DEBUG_TRACE, context1 - ]); + expect(results[TraceType.DEBUG]).to.eql([DEBUG_TRACE, context1]); - expect(results[TraceType.CHOICE]).to.eql([ - CHOICE_TRACE, context1 - ]); + expect(results[TraceType.CHOICE]).to.eql([CHOICE_TRACE, context1]); - expect(results[TraceType.BLOCK]).to.eql([ - BLOCK_TRACE, context1 - ]); + expect(results[TraceType.BLOCK]).to.eql([BLOCK_TRACE, context1]); - expect(results[TraceType.AUDIO]).to.eql([ - AUDIO_TRACE, context1 - ]); + expect(results[TraceType.AUDIO]).to.eql([AUDIO_TRACE, context1]); }); }); });