Skip to content
This repository has been archived by the owner on Mar 1, 2023. It is now read-only.

Added an audio recorder #56

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions lib/Icons.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,4 @@ export { default as Twitter } from '@material-ui/icons/Twitter';
export { default as ImportContactsOutlined } from '@material-ui/icons/ImportContactsOutlined';
export { default as TimerOutlined } from '@material-ui/icons/TimerOutlined';
export { default as TabOutlined } from '@material-ui/icons/TabOutlined';
export { default as MicOutlined } from '@material-ui/icons/MicOutlined';
213 changes: 213 additions & 0 deletions lib/machines/audio-recorder.machine.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,213 @@
import {
assign,
createMachine,
DoneInvokeEvent,
forwardTo,
Sender,
} from 'xstate';

export interface AudioRecorderMachineContext {
stream?: MediaStream;
mediaChunks: Blob[];
}

export type AudioRecorderMachineEvent =
| {
type: 'RETRY';
}
| {
type: 'RECORD';
}
| {
type: 'AUDIO_CHUNK_RECEIVED';
blob: Blob;
}
| {
type: 'PAUSE';
}
| {
type: 'RESUME';
}
| {
type: 'STOP';
}
| {
type: 'DOWNLOAD';
};

const audioRecorderMachine = createMachine<
AudioRecorderMachineContext,
AudioRecorderMachineEvent
>(
{
id: 'audioRecorder',
initial: 'idle',
context: {
mediaChunks: [],
},
exit: ['removeMediaStream'],
states: {
idle: {
on: {
RECORD: {
target: 'requestingAudioOptions',
},
},
},
requestingAudioOptions: {
invoke: {
src: 'requestAudioOptions',
onError: {
target: 'couldNotRetrieveAudioOptions',
},
onDone: {
target: 'recording',
actions: 'assignStreamToContext',
},
},
},
recordingFailed: {
on: {
RETRY: 'recording',
},
},
recording: {
on: {
AUDIO_CHUNK_RECEIVED: {
actions: 'appendBlob',
},
STOP: {
target: 'complete',
},
},
invoke: {
id: 'recording',
src: 'recordFromStream',
onError: {
target: 'recordingFailed',
actions: (context, error) => {
console.error(error);
},
},
},
initial: 'running',
states: {
running: {
on: {
PAUSE: {
target: 'paused',
actions: forwardTo('recording'),
},
},
},
paused: {
on: {
RESUME: {
target: 'running',
actions: forwardTo('recording'),
},
},
},
},
},
complete: {
on: {
RETRY: {
target: 'recording',
actions: 'clearBlobData',
},
DOWNLOAD: {
actions: 'downloadBlob',
},
},
},
couldNotRetrieveAudioOptions: {
on: {
RETRY: 'requestingAudioOptions',
},
},
},
},
{
actions: {
downloadBlob: (context) => {
const blob = new Blob(context.mediaChunks, {
type: 'audio/ogg; codecs=opus',
});
const url = URL.createObjectURL(blob);
const downloadLink = document.createElement('a');

downloadLink.href = url;
downloadLink.download = `file.ogg`;
document.body.appendChild(downloadLink); // Required for FF
downloadLink.click();
},
removeMediaStream: (context) => {
if (context.stream) {
context.stream.getTracks().forEach((track) => {
track.stop();
});
}
},
assignStreamToContext: assign((context, event) => {
return {
stream: (event as DoneInvokeEvent<RequestAudioOptionsOutput>).data
.stream,
};
}),
clearBlobData: assign((context) => {
return {
mediaChunks: [],
};
}),
appendBlob: assign((context, event) => {
if (event.type !== 'AUDIO_CHUNK_RECEIVED') return {};
return {
mediaChunks: [...context.mediaChunks, event.blob],
};
}),
},
services: {
recordFromStream: (context) => (send, onReceive) => {
const mediaRecorder = new MediaRecorder(context.stream);

mediaRecorder.ondataavailable = (e) => {
send({
type: 'AUDIO_CHUNK_RECEIVED',
blob: e.data,
});
};
mediaRecorder.start(200);

onReceive((event) => {
if (event.type === 'PAUSE') {
mediaRecorder.pause();
} else if (event.type === 'RESUME') {
mediaRecorder.resume();
}
});

return () => {
if (mediaRecorder.state !== 'inactive') {
mediaRecorder.stop();
}
};
},
requestAudioOptions: async () => {
const stream = await navigator.mediaDevices.getUserMedia({
audio: true,
});

return {
stream,
};
},
},
},
);

export default audioRecorderMachine;

export interface RequestAudioOptionsOutput {
stream: MediaStream;
}
19 changes: 19 additions & 0 deletions lib/machines/audio-recorder.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Audio Recorder

This machine handles audio recording, including pausing/resuming, and downloading. It's fully implemented in the browser, so feel free to try recording yourself!

## Getting access to audio

In the browser, you need to request access to the user's audio in order to record it. When the user presses <Event>RECORD</Event>, we enter <State>requestingAudioOptions</State>, where the <Service>requestAudioOptions</Service> service is invoked.

If that check fails, via <Event>error.platform.requestAudioOptions</Event>, you'll be sent to the <State>couldNotRetrieveAudioOptions</State> state, where you can <Event>RETRY</Event> to try again.

## Recording

Once the device check completes, it saves a [MediaStream](https://developer.mozilla.org/en-US/docs/Web/API/MediaStream) to <Context stringify>stream</Context> and we enter <State>recording.running</State>. The <Service>recordFromStream</Service> service manages the [MediaRecorder](https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder) and fires back the data to context via <Event>AUDIO_CHUNK_RECEIVED</Event>.

Pressing <Event>PAUSE</Event> will pause the recording and head to <State>recording.paused</State>. Pressing <Event>RESUME</Event> starts the recording again.

## Stopping and downloading

Once you're done with the recording, you can <Event>STOP</Event> it. This heads to the <State>complete</State> state, where you can <Event>RETRY</Event> to go again or <Event>DOWNLOAD</Event> to grab the recording.
158 changes: 158 additions & 0 deletions lib/machines/audio-video-device-selection.machine.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
import {
assign,
createMachine,
DoneInvokeEvent,
EventObject,
MachineOptions,
} from 'xstate';

export interface AudioVideoDeviceSelectionMachineContext {
audioInputDevices: MediaDeviceInfo[];
audioOutputDevices: MediaDeviceInfo[];
videoInputDevices: MediaDeviceInfo[];
selectedAudioInputDevice?: MediaDeviceInfo;
selectedAudioOutputDevice?: MediaDeviceInfo;
selectedVideoInputDevice?: MediaDeviceInfo;
formValues: { username: string; password: string };
}

export type AudioVideoDeviceSelectionMachineEvent =
| {
type: 'CHOOSE_AUDIO_INPUT_DEVICE';
index: number;
}
| {
type: 'CHOOSE_AUDIO_OUTPUT_DEVICE';
index: number;
}
| {
type: 'CHOOSE_VIDEO_DEVICE';
index: number;
};

export type DevicesDoneEvent = DoneInvokeEvent<{
devices: MediaDeviceInfo[];
}>;

const createAudioVideoDeviceSelectionMachineOptions = <
TContext,
TEvent extends EventObject
>(
options: Partial<MachineOptions<TContext, TEvent>>,
): Partial<MachineOptions<TContext, TEvent>> => {
return options;
};

const audioVideoDeviceSelectionMachine = createMachine<
AudioVideoDeviceSelectionMachineContext,
AudioVideoDeviceSelectionMachineEvent
>(
{
id: 'audioVideoDeviceSelection',
initial: 'requesting devices',
context: {
audioInputDevices: [],
audioOutputDevices: [],
videoInputDevices: [],
formValues: {
username: '',
password: '',
},
},
states: {
'requesting devices': {
invoke: {
src: 'requestAudioOptions',
onError: {
target: 'could not retrieve devices',
},
onDone: {
actions: [
'assignDevicesToContext',
'assignDefaultDevicesToContext',
],
target: 'got devices',
},
},
},
'could not retrieve devices': {},
'got devices': {
on: {
CHOOSE_AUDIO_INPUT_DEVICE: {
cond: (context, event) =>
Boolean(context.audioInputDevices[event.index]),
actions: assign((context, event) => {
return {
selectedAudioInputDevice:
context.audioInputDevices[event.index],
};
}),
},
CHOOSE_AUDIO_OUTPUT_DEVICE: {
cond: (context, event) =>
Boolean(context.audioOutputDevices[event.index]),
actions: assign((context, event) => {
return {
selectedAudioOutputDevice:
context.audioOutputDevices[event.index],
};
}),
},
CHOOSE_VIDEO_DEVICE: {
cond: (context, event) =>
Boolean(context.videoInputDevices[event.index]),
actions: assign((context, event) => {
return {
selectedVideoInputDevice:
context.videoInputDevices[event.index],
};
}),
},
},
},
},
},
{
actions: {
assignDevicesToContext: assign((context, event: unknown) => {
return {
audioInputDevices: (event as DevicesDoneEvent).data.devices.filter(
(device) => device.deviceId && device.kind === 'audioinput',
),
audioOutputDevices: (event as DevicesDoneEvent).data.devices.filter(
(device) => device.deviceId && device.kind === 'audiooutput',
),
videoInputDevices: (event as DevicesDoneEvent).data.devices.filter(
(device) => device.deviceId && device.kind === 'videoinput',
),
};
}),
assignDefaultDevicesToContext: assign((context) => {
return {
selectedAudioInputDevice: context.audioInputDevices[0],
selectedAudioOutputDevice: context.audioOutputDevices[0],
selectedVideoInputDevice: context.videoInputDevices[0],
};
}),
},
services: {
requestAudioOptions: async () => {
const stream = await navigator.mediaDevices.getUserMedia({
audio: true,
video: true,
});
const devices = await navigator.mediaDevices.enumerateDevices();

stream.getTracks().forEach((track) => {
track.stop();
});

return {
devices,
};
},
},
},
);

export default audioVideoDeviceSelectionMachine;
Loading