mt_inference.py
Less than 1 minute
mt_inference.py
MT Decoding
usage: mt_inference.py [-h] [--config CONFIG] [--log_level {CRITICAL,ERROR,WARNING,INFO,DEBUG,NOTSET}] --output_dir OUTPUT_DIR [--ngpu NGPU]
[--seed SEED] [--dtype {float16,float32,float64}] [--num_workers NUM_WORKERS] --data_path_and_name_and_type
DATA_PATH_AND_NAME_AND_TYPE [--key_file KEY_FILE] [--allow_variable_data_keys ALLOW_VARIABLE_DATA_KEYS]
[--mt_train_config MT_TRAIN_CONFIG] [--mt_model_file MT_MODEL_FILE] [--lm_train_config LM_TRAIN_CONFIG] [--lm_file LM_FILE]
[--word_lm_train_config WORD_LM_TRAIN_CONFIG] [--word_lm_file WORD_LM_FILE] [--ngram_file NGRAM_FILE] [--model_tag MODEL_TAG]
[--batch_size BATCH_SIZE] [--nbest NBEST] [--beam_size BEAM_SIZE] [--penalty PENALTY] [--maxlenratio MAXLENRATIO]
[--minlenratio MINLENRATIO] [--ctc_weight CTC_WEIGHT] [--lm_weight LM_WEIGHT] [--ngram_weight NGRAM_WEIGHT]
[--token_type {char,bpe,None}] [--bpemodel BPEMODEL] [--normalize_length NORMALIZE_LENGTH]