Facial Expressions
This section provides comprehensive information on integrating and handling facial expressions and lipsync within your web applications using the convai-web-sdk.
Initialization
convaiClient.current = new ConvaiClient({
apiKey: '<apiKey>',
characterId: '<characterId>',
enableAudio: true,
enableFacialData: true,
faceModel: 3, // OVR lipsync
});Receiving Viseme Data
const [facialData, setFacialData] = useState([]);
const facialRef = useRef([]);
convaiClient.current.setResponseCallback((response) => {
if (response.hasAudioResponse()) {
let audioResponse = response?.getAudioResponse();
if (audioResponse?.getVisemesData()?.array[0]) {
//Viseme data
let faceData = audioResponse?.getVisemesData().array[0];
//faceData[0] implies sil value. Which is -2 if new chunk of audio is recieve.
if (faceData[0] !== -2) {
facialRef.current.push(faceData);
setFacialData(facialRef.current);
}
}
}