tpu
This commit is contained in:
@@ -20,7 +20,7 @@ use_amp: true # whether to use automatic mixed precision (AMP) for training
|
||||
|
||||
# TPU and distributed training settings
|
||||
use_tpu: true # whether to use TPU for training (set to true for TPU)
|
||||
num_tpu_cores: 8 # number of TPU cores to use (typically 8 for v3-8 or v4-8)
|
||||
num_tpu_cores: 8 # number of TPU cores to use (full TPU v3-8)
|
||||
gradient_accumulation_steps: 1 # number of gradient accumulation steps for distributed training
|
||||
|
||||
output_dir: trained_models/baseline_rnn # directory to save the trained model and logs
|
||||
|
Reference in New Issue
Block a user