using System;
using System.Collections;
using System.Collections.Generic;
using System.IO;
using UnityEngine;
using OpenAI_API;
using OpenAI_API.Chat;
using OpenAI_API.Models;
using OpenAI_API.Audio;
using static OpenAI_API.Audio.TextToSpeechRequest;
public class OpenAI_TTS : MonoBehaviour
{
private OpenAIAPI api;
private static AudioSource audioSource;
public void Start()
{
api = new OpenAIAPI(Environment.GetEnvironmentVariable("OPENAI_API_KEY", EnvironmentVariableTarget.User));
audioSource = gameObject.AddComponent<AudioSource>();
}
public async void text_to_speech(string str)
{
Debug.Log("text_to_speech");
var request = new TextToSpeechRequest()
{
Input = str,
ResponseFormat = ResponseFormats.MP3,
Model = Model.TTS_HD,
Voice = Voices.Nova,
Speed = 0.9
};
FileInfo file_ = await api.TextToSpeech.SaveSpeechToFileAsync(request, "speak_temp.mp3"); //save to file
Debug.Log(file_.FullName);
var www = new WWW(file_.FullName);
audioSource.clip = www.GetAudioClip(true, true, AudioType.MPEG);
audioSource.Play();
}
}