Implementation of E2-TTS, Embarrassingly Easy Fully Non-Autoregressive Zero-Shot TTS, in Pytorch
- Manmay for contributing working end-to-end training code!
$ pip install e2-tts-pytorch
import torch
from e2_tts_pytorch import (
E2TTS,
DurationPredictor
)
duration_predictor = DurationPredictor(
transformer = dict(
dim = 512,
depth = 8,
)
)
mel = torch.randn(2, 1024, 512)
text = ['Hello', 'Goodbye']
loss = duration_predictor(mel, text = text)
loss.backward()
e2tts = E2TTS(
duration_predictor = duration_predictor,
transformer = dict(
dim = 512,
depth = 8,
skip_connect_type = 'concat'
),
)
loss = e2tts(mel, text = text)
loss.backward()
sampled = e2tts.sample(mel[:, :5], text = text)
@inproceedings{Eskimez2024E2TE,
title = {E2 TTS: Embarrassingly Easy Fully Non-Autoregressive Zero-Shot TTS},
author = {Sefik Emre Eskimez and Xiaofei Wang and Manthan Thakker and Canrun Li and Chung-Hsien Tsai and Zhen Xiao and Hemin Yang and Zirun Zhu and Min Tang and Xu Tan and Yanqing Liu and Sheng Zhao and Naoyuki Kanda},
year = {2024},
url = {https://api.semanticscholar.org/CorpusID:270738197}
}
@inproceedings{Li2024ImmiscibleDA,
title = {Immiscible Diffusion: Accelerating Diffusion Training with Noise Assignment},
author = {Yiheng Li and Heyang Jiang and Akio Kodaira and Masayoshi Tomizuka and Kurt Keutzer and Chenfeng Xu},
year = {2024},
url = {https://api.semanticscholar.org/CorpusID:270562607}
}