So sieht das Skript aus
Code: Select all
import torch
import TTS
from TTS.tts.configs.xtts_config import XttsConfig
from TTS.utils.manage import ModelManager
from TTS.utils.generic_utils import get_user_data_dir
from TTS.tts.models.xtts import Xtts
import os
import sounddevice as sd
print('Loading TTS config and model')
torch.serialization.add_safe_globals([XttsConfig, TTS.tts.models.xtts.XttsAudioConfig, TTS.config.shared_configs.BaseDatasetConfig, TTS.tts.models.xtts.XttsArgs])
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tts_model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
ModelManager().download_model("tts_models/multilingual/multi-dataset/xtts_v2")
model_path = os.path.join(get_user_data_dir("tts"), tts_model_name.replace("/", "--"))
config = XttsConfig()
config.load_json(os.path.join(model_path, "config.json"))
model = Xtts.init_from_config(config)
model.load_checkpoint(config, model_path, use_deepspeed=False)
voice_wav = './audio/output2.wav'
gpt_cond_latent, speaker_embedding = model.get_conditioning_latents(audio_path=voice_wav, gpt_cond_len=36, gpt_cond_chunk_len=12, load_sr=22050)
model.to(device, non_blocking=True)
print('TTS model ready')
# running
text = 'Servus, ich kann dich hören, ist bei dir alles in Ordnung? Kann ich irgendwie helfen? Frag mich wenn du was hast.'
with sd.OutputStream(24000, channels=1) as stream:
for chunk in model.inference_stream(text, language='de', gpt_cond_latent=gpt_cond_latent, speaker_embedding=speaker_embedding, stream_chunk_size=35):
chunk_np = chunk.cpu().numpy() if torch.is_tensor(chunk) else chunk
stream.write(chunk_np)
Code: Select all
import time
t0 = time.time()
chunk = next(model.inference_stream(
text,
language='de',
gpt_cond_latent=gpt_cond_latent,
speaker_embedding=speaker_embedding
))
gen_time = time.time() - t0
t1 = time.time()
chunk_np = chunk.cpu().numpy() if torch.is_tensor(chunk) else chunk
copy_time = time.time() - t1
print("gen_time:", gen_time, "copy_time:", copy_time, "samples:", chunk_np.size)
Code: Select all
gen_time: 0.8997743129730225 copy_time: 0.0 samples: 21248
Mobile version