aws-amplify#Predictions JavaScript Examples
The following examples show how to use
aws-amplify#Predictions.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Note.js From aws-amplify-quick-notes with MIT No Attribution | 5 votes |
NotesComponent = props => {
const [showEditor, setShowEditor] = useState(false);
const playAudio = async () => {
const result = await Predictions.convert({
textToSpeech: {
source: {
text: props.text
}
}
});
const audioCtx = new AudioContext();
const source = audioCtx.createBufferSource();
audioCtx.decodeAudioData(
result.audioStream,
buffer => {
source.buffer = buffer;
source.connect(audioCtx.destination);
source.start(0);
},
error => console.log(error)
);
};
return (
<Note>
<Info>
<Title>{props.title}</Title>
<Text>{props.text}</Text>
</Info>
<Divider />
<NoteActions>
<Icon onClick={() => playAudio()}>
<FaPlay />
</Icon>
<Icon onClick={() => setShowEditor(true)}>
<FaRegEdit />
</Icon>
<Icon>
<FaRegTrashAlt onClick={props.onDelete} />
</Icon>
</NoteActions>
{showEditor && (
<RecordingEditor
title={props.title}
text={props.text}
onDismiss={() => {
setShowEditor(false);
}}
onSave={props.onSaveChanges}
/>
)}
</Note>
);
}
Example #2
Source File: Record.js From aws-amplify-quick-notes with MIT No Attribution | 4 votes |
RecordComponent = props => {
const [isRecording, setIsRecording] = useState(false);
const [showRecordingEditor, setShowRecordingEditor] = useState(false);
const [recordingText, setRecordingText] = useState("");
const [isConverting, setIsConverting] = useState("");
const [micStream, setMicStream] = useState();
const [audioBuffer] = useState(
(function() {
let buffer = [];
function add(raw) {
buffer = buffer.concat(...raw);
return buffer;
}
function newBuffer() {
console.log("reseting buffer");
buffer = [];
}
return {
reset: function() {
newBuffer();
},
addData: function(raw) {
return add(raw);
},
getData: function() {
return buffer;
}
};
})()
);
const startRecording = async () => {
const stream = await window.navigator.mediaDevices.getUserMedia({
video: false,
audio: true
});
const startMic = new mic();
startMic.setStream(stream);
startMic.on("data", chunk => {
var raw = mic.toRaw(chunk);
if (raw == null) {
return;
}
audioBuffer.addData(raw);
});
setMicStream(startMic);
setIsRecording(true);
};
const stopRecording = async () => {
micStream.stop();
setIsRecording(false);
setIsConverting(true);
const buffer = audioBuffer.getData();
const result = await Predictions.convert({
transcription: {
source: {
bytes: buffer
}
}
});
setMicStream(null);
audioBuffer.reset();
setRecordingText(result.transcription.fullText);
setIsConverting(false);
setShowRecordingEditor(true);
};
return (
<Container>
<div
css={css`
position: relative;
justify-content: center;
align-items: center;
width: 120px;
height: 120px;
`}
>
<div
css={[
css`
width: 100%;
height: 100%;
top: 0;
left: 0;
position: absolute;
border-radius: 50%;
background-color: #74b49b;
`,
isRecording || isConverting
? css`
animation: ${pulse} 1.5s ease infinite;
`
: {}
]}
/>
<div
css={css`
width: 100%;
height: 100%;
top: 0;
left: 0;
position: absolute;
border-radius: 50%;
background-color: #74b49b;
display: flex;
cursor: pointer;
`}
onClick={() => {
if (!isRecording) {
startRecording();
} else {
stopRecording();
}
}}
>
{isConverting ? (
<FaMicrophoneAltSlash
size={50}
style={{ margin: "auto" }}
/>
) : isRecording ? (
<FaMicrophone
size={50}
style={{ margin: "auto" }}
/>
) : (
<FaMicrophoneAlt
size={50}
style={{ margin: "auto" }}
/>
)}
</div>
</div>
{showRecordingEditor && (
<RecordingEditor
text={recordingText}
onDismiss={() => {
setShowRecordingEditor(false);
}}
onSave={async data => {
await API.graphql(graphqlOperation(createNote, { input: data }));
props.setTabIndex(0);
}}
/>
)}
</Container>
);
}