Skip to content
This repository has been archived by the owner on Aug 14, 2021. It is now read-only.

Commit

Permalink
fix: send intent (CORE-4946) (#35)
Browse files Browse the repository at this point in the history
* feat: intent request

* chore: full test coverage

* chore: remove unused

* chore: section
  • Loading branch information
DecathectZero authored Feb 23, 2021
1 parent 2af076f commit cb4bdbb
Show file tree
Hide file tree
Showing 4 changed files with 104 additions and 40 deletions.
3 changes: 2 additions & 1 deletion docs/advanced-usage.md
Original file line number Diff line number Diff line change
Expand Up @@ -92,11 +92,12 @@ Different interaction method have different side-effects on the conversation ses

1. `.start()` - Starts the conversation session and runs the application until it requests user input, at which point, the method returns the current `context`. If this is called while a conversation session is ongoing, then it starts a new conversation session from the beginning.
2. `.sendText(userInput)` - Advances the conversation session based on the user's input and then runs the application until it requests user input, at which point, the method returns the current `context`.
3. `.sendIntent(intentName, entities)` - Advances the conversation session based an intent being invoked - make sure that the `intentName` exists in the interaction model on your Voiceflow project. This bypasses NLP/NLU resolution, and is useful in explicitly triggering certain conversation paths. The method returns the current `context`.

Now, only certain interaction methods are allowed to be called at certain points in conversation session.

1. `.start()` is callable any time.
2. `.sendText()` is callable only if the `RuntimeClient` contains some ongoing conversation session. That is, `runtimeClient.getContext().isEnding()` is `false`. If you call `.sendText()` when the return of the aforementioned `.isEnding()` call is `true`, then calling `.sendText()` throws an exception.
2. `.sendText()` and `.sendIntent()` are callable only if the `RuntimeClient` contains some ongoing conversation session. That is, `runtimeClient.getContext().isEnding()` is `false`. If you call `.sendText()` when the return of the aforementioned `.isEnding()` call is `true`, then calling `.sendText()` throws an exception.

Thus, if `runtimeClient.getContext().isEnding()` is `true`, the only valid method you may call is `.start()` to restart the conversation session from the beginning.

Expand Down
13 changes: 13 additions & 0 deletions lib/RuntimeClient/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,19 @@ export class RuntimeClient<V extends Record<string, any> = Record<string, any>>
return this.sendRequest({ type: RequestType.TEXT, payload: userInput });
}

async sendIntent(
name: string,
entities: {
name: string;
value: string;
query?: string;
}[] = [],
query = '',
confidence?: number
): Promise<Context<V>> {
return this.sendRequest({ type: RequestType.INTENT, payload: { intent: { name }, entities, query, confidence } });
}

async sendRequest(request: GeneralRequest) {
if (this.context.isEnding()) {
throw new VFClientError('RuntimeClient.sendText() was called but the conversation has ended');
Expand Down
19 changes: 18 additions & 1 deletion tests/lib/Context/fixtures.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { RequestType } from '@voiceflow/general-types';
import { IntentRequest, RequestType } from '@voiceflow/general-types';
import { State } from '@voiceflow/runtime';
import _ from 'lodash';

Expand Down Expand Up @@ -67,6 +67,12 @@ export const START_RESPONSE_BODY_WITH_NO_CHOICES = {
};

export const USER_RESPONSE = 'This is what the user says in response to the voice assistant';
export const INTENT_RESPONSE: IntentRequest['payload'] = {
intent: { name: 'anyIntent' },
entities: [{ name: 'foo', value: 'bar' }],
query: USER_RESPONSE,
confidence: 1,
};

export const SEND_TEXT_REQUEST_BODY: RequestContext = {
state: VF_APP_NEXT_STATE_1,
Expand All @@ -79,6 +85,17 @@ export const SEND_TEXT_REQUEST_BODY: RequestContext = {
},
};

export const SEND_INTENT_REQUEST_BODY: RequestContext = {
state: VF_APP_NEXT_STATE_1,
request: {
type: RequestType.INTENT,
payload: INTENT_RESPONSE,
},
config: {
tts: false,
},
};

export const SEND_TEXT_REQUEST_BODY_TTS_ON: RequestContext = {
state: VF_APP_NEXT_STATE_1,
request: {
Expand Down
109 changes: 71 additions & 38 deletions tests/lib/RuntimeClient/index.unit.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,16 @@ import _ from 'lodash';
import sinon from 'sinon';

import RuntimeClient from '@/lib/RuntimeClient';
import { DataConfig, TraceType, TRACE_EVENT } from '@/lib/types';
import { DataConfig, TRACE_EVENT, TraceType } from '@/lib/types';
import { makeTraceProcessor } from '@/lib/Utils/makeTraceProcessor';

import {
CHOICE_TRACE,
CHOICES_1,
CHOICES_2,
CHOICES_3,
INTENT_RESPONSE,
SEND_INTENT_REQUEST_BODY,
SEND_TEXT_REQUEST_BODY,
SEND_TEXT_REQUEST_BODY_TTS_ON,
SEND_TEXT_RESPONSE_BODY,
Expand All @@ -23,7 +26,6 @@ import {
VF_APP_INITIAL_STATE,
} from '../Context/fixtures';
import { AUDIO_TRACE, BLOCK_TRACE, DEBUG_TRACE, END_TRACE, FLOW_TRACE, SPEAK_TRACE } from '../fixtures';
import { makeTraceProcessor } from '@/lib/Utils/makeTraceProcessor';

chai.use(chaiAsPromise);

Expand Down Expand Up @@ -107,6 +109,48 @@ describe('RuntimeClient', () => {
expect(data.toJSON()).to.eql(SEND_TEXT_RESPONSE_BODY);
});

it('sendIntent', async () => {
const { agent, client } = createRuntimeClient();

client.interact.resolves(START_RESPONSE_BODY);

await agent.start();

client.interact.resolves(SEND_TEXT_RESPONSE_BODY);

const data = await agent.sendIntent(INTENT_RESPONSE.intent.name, INTENT_RESPONSE.entities, INTENT_RESPONSE.query, INTENT_RESPONSE.confidence);

expect(client.interact.callCount).to.eql(2);
expect(client.interact.args[1]).to.eql([SEND_INTENT_REQUEST_BODY]);

expect(data.toJSON()).to.eql(SEND_TEXT_RESPONSE_BODY);
});

it('sendIntent, empty', async () => {
const { agent, client } = createRuntimeClient();

client.interact.resolves(START_RESPONSE_BODY);

await agent.start();

client.interact.resolves(SEND_TEXT_RESPONSE_BODY);

const data = await agent.sendIntent(INTENT_RESPONSE.intent.name);

expect(client.interact.callCount).to.eql(2);
expect(client.interact.args[1]).to.eql([
{
...SEND_INTENT_REQUEST_BODY,
request: {
...SEND_INTENT_REQUEST_BODY.request,
payload: { intent: { name: INTENT_RESPONSE.intent.name }, entities: [], query: '', confidence: undefined },
},
},
]);

expect(data.toJSON()).to.eql(SEND_TEXT_RESPONSE_BODY);
});

it('sendText, empty', async () => {
const { agent, client } = createRuntimeClient();

Expand Down Expand Up @@ -269,10 +313,10 @@ describe('RuntimeClient', () => {
describe('events', () => {
it('on', async () => {
const { agent, client } = createRuntimeClient();

const result1: any[] = [];
const result2: any[] = [];

agent.on(TraceType.SPEAK, (trace, context) => {
result1.push(trace, context);
});
Expand All @@ -284,16 +328,20 @@ describe('RuntimeClient', () => {

const context = await agent.start();

expect(result1).to.eql([
SPEAK_TRACE, context,
]);
expect(result1).to.eql([SPEAK_TRACE, context]);
expect(result2).to.eql([
SPEAK_TRACE, context,
BLOCK_TRACE, context,
FLOW_TRACE, context,
AUDIO_TRACE, context,
DEBUG_TRACE, context,
CHOICE_TRACE, context
SPEAK_TRACE,
context,
BLOCK_TRACE,
context,
FLOW_TRACE,
context,
AUDIO_TRACE,
context,
DEBUG_TRACE,
context,
CHOICE_TRACE,
context,
]);
});

Expand All @@ -304,7 +352,7 @@ describe('RuntimeClient', () => {

const callback = () => {
agent.on(BAD_TRACE_TYPE, () => {});
}
};

expect(callback).to.throw();
});
Expand All @@ -314,11 +362,11 @@ describe('RuntimeClient', () => {

const results: any = {};
Object.keys(TraceType)
.map(trace => trace.toLowerCase())
.map((trace) => trace.toLowerCase())
.forEach((trace) => {
results[trace] = [];
});

const insertToResults = (trace: any, context: any) => {
results[trace.type].push(trace, context);
};
Expand All @@ -340,36 +388,21 @@ describe('RuntimeClient', () => {

const context2 = await agent.sendText('some nonsense');

expect(results[TraceType.SPEAK]).to.eql([
SPEAK_TRACE, context1,
SPEAK_TRACE, context2
]);
expect(results[TraceType.SPEAK]).to.eql([SPEAK_TRACE, context1, SPEAK_TRACE, context2]);

expect(results[TraceType.VISUAL]).to.eql([]);

expect(results[TraceType.FLOW]).to.eql([
FLOW_TRACE, context1
]);
expect(results[TraceType.FLOW]).to.eql([FLOW_TRACE, context1]);

expect(results[TraceType.END]).to.eql([
END_TRACE, context2
]);
expect(results[TraceType.END]).to.eql([END_TRACE, context2]);

expect(results[TraceType.DEBUG]).to.eql([
DEBUG_TRACE, context1
]);
expect(results[TraceType.DEBUG]).to.eql([DEBUG_TRACE, context1]);

expect(results[TraceType.CHOICE]).to.eql([
CHOICE_TRACE, context1
]);
expect(results[TraceType.CHOICE]).to.eql([CHOICE_TRACE, context1]);

expect(results[TraceType.BLOCK]).to.eql([
BLOCK_TRACE, context1
]);
expect(results[TraceType.BLOCK]).to.eql([BLOCK_TRACE, context1]);

expect(results[TraceType.AUDIO]).to.eql([
AUDIO_TRACE, context1
]);
expect(results[TraceType.AUDIO]).to.eql([AUDIO_TRACE, context1]);
});
});
});

0 comments on commit cb4bdbb

Please sign in to comment.