#@markdown ## **3** Setting model parameters #@markdown --- #@markdown #### Your desired model name model_filename = 'ninon-43' #@param {type: "string"} #@markdown #### Upload your transcription / text to tacotron/filelists and right click -> copy path Training_file = "filelists/list.txt" #@param {type: "string"} hparams.training_files = Training_file hparams.validation_files = Training_file # hparams to Tune #hparams.use_mmi=True, # not used in this notebook #hparams.use_gaf=True, # not used in this notebook #hparams.max_gaf=0.5, # not used in this notebook #hparams.drop_frame_rate = 0.2 # not used in this notebook hparams.p_attention_dropout=0.1 hparams.p_decoder_dropout=0.1 # Quality of Life generate_mels = True hparams.show_alignments = True alignment_graph_height = 600 alignment_graph_width = 1000 #@markdown #### Your batch size, lower if you don't have enough ram. #@markdown ###### If you have a Tesla T4, it is recommended to use only 14 as your batch size. hparams.batch_size = 43#@param {type: "integer"} hparams.load_mel_from_disk = True hparams.ignore_layers = [] # Layers to reset (None by default, other than foreign languages this param can be ignored) #@markdown #### Your learning rate options. #@markdown ###### You can choose to auto-calculate a learning rate or manually set one. The default of the original notebook is 5e-5. # Learning Rate # https://www.desmos.com/calculator/ptgcz4vzsw / http://boards.4channel.org/mlp/thread/34778298#p34789030 hparams.decay_start = 15000 # wait till decay_start to start decaying learning rate autocalculate_learning_rate = True #@param {type:"boolean"} #@markdown ###### Manual learning rate override. Ignore if autocalculating learning rate. if autocalculate_learning_rate == True: hparams.A_ = 0.001*(hparams.batch_size/256)**0.5 # Start/Max Learning Rate else: hparams.A_ = 1e-5#@param {type:"number"} hparams.B_ = 8000 # Decay Rate hparams.C_ = 0 # Shift learning rate equation by this value hparams.min_learning_rate = 1e-5 # Min Learning Rate #@markdown #### Your total epochs to train to. Not recommended to change. ##@markdown #### Amount of epochs before stopping, preferably a very high amount to not stop. hparams.epochs = 900#@param {type: "integer"} torch.backends.cudnn.enabled = hparams.cudnn_enabled torch.backends.cudnn.benchmark = hparams.cudnn_benchmark #@markdown #### Where to save your model when training. output_directory = '/content/drive/My Drive/colab/outdir' #@param {type: "string"} log_directory = '/content/tacotron2/logs' # Location to save Log files locally log_directory2 = '/content/drive/My Drive/colab/logs' # Location to copy log files (done at the end of each epoch to cut down on I/O) checkpoint_path = output_directory+(r'/')+model_filename #@markdown ---