diff --git a/RegDGCNN_SurfaceFields/.gitignore b/RegDGCNN_SurfaceFields/.gitignore new file mode 100644 index 0000000..cd07313 --- /dev/null +++ b/RegDGCNN_SurfaceFields/.gitignore @@ -0,0 +1,7 @@ +My_python_job/Pressure_VTK/ +My_python_job/__pycache__/ +My_python_job/logs/ +My_python_job/Cache_data/ +__pycache__/ +logs/ + diff --git a/RegDGCNN_SurfaceFields/GPU_list.sh b/RegDGCNN_SurfaceFields/GPU_list.sh new file mode 100644 index 0000000..be8366b --- /dev/null +++ b/RegDGCNN_SurfaceFields/GPU_list.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Keywords to identify GPU queues +GPU_KEYWORDS="gpu|v100|a100|hgx" + +echo "--------------------------------------------" +echo "Checking GPU queues..." +echo "--------------------------------------------" + +# Get filtered queue list and save to a temp file +bqueues | grep -E "${GPU_KEYWORDS}" > gpu_queues.tmp + +# Initialize variables to find fastest +best_queue="" +min_pend=999999 + +# Read each line +while read -r line; do + # Parse columns (assuming default bqueues column layout) + queue_name=$(echo "$line" | awk '{print $1}') + pend=$(echo "$line" | awk '{print $8}') # 8th column is PEND + + echo "Queue: $queue_name - Pending jobs: $pend" + + # Check if this queue has fewer pending jobs + if [ "$pend" -lt "$min_pend" ]; then + min_pend=$pend + best_queue=$queue_name + fi +done < gpu_queues.tmp + +echo "--------------------------------------------" +echo "Recommended GPU queue (least pending): $best_queue (Pending: $min_pend)" +echo "--------------------------------------------" + +# Clean up +rm -f gpu_queues.tmp + diff --git a/RegDGCNN_SurfaceFields/Model_Evaluation.sh b/RegDGCNN_SurfaceFields/Model_Evaluation.sh new file mode 100644 index 0000000..f889059 --- /dev/null +++ b/RegDGCNN_SurfaceFields/Model_Evaluation.sh @@ -0,0 +1,10 @@ +python run_pipeline.py \ + --stages evaluate \ + --exp_name "DrivAerNet_Pressure" \ + --dataset_path "$HOME/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM" \ + --cache_dir "$HOME/ML_Turbulent/Data_Pressure_Field/Cache_data" \ + --subset_dir "$HOME/ML_Turbulent/DrivAerNet/train_val_test_splits" \ + --num_points 10000 \ + --num_eval_samples 5 \ + --gpus "0" + diff --git a/RegDGCNN_SurfaceFields/Model_Preprocess.sh b/RegDGCNN_SurfaceFields/Model_Preprocess.sh new file mode 100644 index 0000000..0e8157c --- /dev/null +++ b/RegDGCNN_SurfaceFields/Model_Preprocess.sh @@ -0,0 +1,7 @@ +python run_pipeline.py \ + --stages preprocess \ + --exp_name "DrivAerNet_Pressure" \ + --dataset_path "$HOME/ML_Turbulent/Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM" \ + --cache_dir "$HOME/ML_Turbulent/Pressure_Field/Cache_data" \ + --subset_dir "$HOME/ML_Turbulent/Pressure_Field/train_val_test_splits" \ + --num_points 10000 diff --git a/RegDGCNN_SurfaceFields/Model_Test.sh b/RegDGCNN_SurfaceFields/Model_Test.sh new file mode 100644 index 0000000..9231bbe --- /dev/null +++ b/RegDGCNN_SurfaceFields/Model_Test.sh @@ -0,0 +1,9 @@ +python test_single_vtk.py \ + --model_checkpoint "experiments/DrivAerNet_Pressure/best_model.pth" \ + --vtk_file "$HOME/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM/N_S_WWS_WM_001.vtk" \ + --output_dir "visualizations" \ + --num_points 10000 \ + --k 40 \ + --emb_dims 1024 \ + --dropout 0.4 + diff --git a/RegDGCNN_SurfaceFields/Model_Training.sh b/RegDGCNN_SurfaceFields/Model_Training.sh new file mode 100644 index 0000000..066430f --- /dev/null +++ b/RegDGCNN_SurfaceFields/Model_Training.sh @@ -0,0 +1,32 @@ +python run_pipeline.py \ + --stages train \ + --exp_name "DrivAerNet_Pressure" \ + --dataset_path "$HOME/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM" \ + --subset_dir "$HOME/ML_Turbulent/DrivAerNet/train_val_test_splits" \ + --cache_dir "$HOME/ML_Turbulent/Data_Pressure_Field/Cache_data" \ + --num_points 10000 \ + --num_workers 1 \ + --batch_size 6 \ + --epochs 150 \ + --gpus "0" + + +# === Variable === +# --batch_size +# It defines how many samples are processed at once per training step +# +# --epochs +# One epoch = one full pass through the training dataset +# +# Total number of training rounds over the whole dataset +# e.g. 10000 samples, batch_size = 100 +# 10 000 / 100 = 100 steps for one epoch +# --epochs 150 have 150 times loop + +# --dataset_path "$HOME/Data_Pressure/Pressure_VTK" \ +# --subset_dir "$HOME/ML_Turbulent/DrivAerNet/train_val_test_splits" \ +# --cache_dir "$HOME/Data_Pressure/Cache_data" \ + +# --dataset_path "$HOME/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM" \ +# --subset_dir "$HOME/ML_Turbulent/DrivAerNet/train_val_test_splits" \ +# --cache_dir "$HOME/ML_Turbulent/Data_Pressure_Field/Cache_data" \ diff --git a/RegDGCNN_SurfaceFields/My_python_job/Model_Script/GPU_list.sh b/RegDGCNN_SurfaceFields/My_python_job/Model_Script/GPU_list.sh new file mode 100644 index 0000000..be8366b --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Model_Script/GPU_list.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Keywords to identify GPU queues +GPU_KEYWORDS="gpu|v100|a100|hgx" + +echo "--------------------------------------------" +echo "Checking GPU queues..." +echo "--------------------------------------------" + +# Get filtered queue list and save to a temp file +bqueues | grep -E "${GPU_KEYWORDS}" > gpu_queues.tmp + +# Initialize variables to find fastest +best_queue="" +min_pend=999999 + +# Read each line +while read -r line; do + # Parse columns (assuming default bqueues column layout) + queue_name=$(echo "$line" | awk '{print $1}') + pend=$(echo "$line" | awk '{print $8}') # 8th column is PEND + + echo "Queue: $queue_name - Pending jobs: $pend" + + # Check if this queue has fewer pending jobs + if [ "$pend" -lt "$min_pend" ]; then + min_pend=$pend + best_queue=$queue_name + fi +done < gpu_queues.tmp + +echo "--------------------------------------------" +echo "Recommended GPU queue (least pending): $best_queue (Pending: $min_pend)" +echo "--------------------------------------------" + +# Clean up +rm -f gpu_queues.tmp + diff --git a/RegDGCNN_SurfaceFields/My_python_job/Model_Script/Model_Evaluate.sh b/RegDGCNN_SurfaceFields/My_python_job/Model_Script/Model_Evaluate.sh new file mode 100644 index 0000000..6fbff3b --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Model_Script/Model_Evaluate.sh @@ -0,0 +1,17 @@ +python ./run_pipeline.py \ + --stages evaluate \ + --exp_name "Train_Test" \ + --dataset_path "$HOME/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM" \ + --subset_dir "$HOME/ML_Turbulent/DrivAerNet/train_val_test_splits" \ + --cache_dir "$HOME/ML_Turbulent/Data_Pressure_Field/Cache_data" \ + --num_points 50000 \ + --num_eval_samples 5 \ + --gpus "0" + +# --dataset_path "$HOME/Data_Pressure/Pressure_VTK" \ +# --subset_dir "$HOME/ML_Turbulent/DrivAerNet/train_val_test_splits" \ +# --cache_dir "$HOME/Data_Pressure/Cache_data" \ + +# --dataset_path "$HOME/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM" \ +# --subset_dir "$HOME/ML_Turbulent/DrivAerNet/train_val_test_splits" \ +# --cache_dir "$HOME/ML_Turbulent/Data_Pressure_Field/Cache_data" \ diff --git a/RegDGCNN_SurfaceFields/My_python_job/Model_Script/Model_Preprocess.sh b/RegDGCNN_SurfaceFields/My_python_job/Model_Script/Model_Preprocess.sh new file mode 100644 index 0000000..8bcd6db --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Model_Script/Model_Preprocess.sh @@ -0,0 +1,15 @@ +python run_pipeline.py \ + --stages preprocess \ + --exp_name "Train_Test" \ + --dataset_path "$HOME/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM" \ + --subset_dir "$HOME/ML_Turbulent/DrivAerNet/train_val_test_splits" \ + --cache_dir "$HOME/ML_Turbulent/Data_Pressure_Field/Cache_data" \ + --num_points 50000 + +# --dataset_path "$HOME/Data_Pressure/Pressure_VTK" \ +# --subset_dir "$HOME/ML_Turbulent/DrivAerNet/train_val_test_splits" \ +# --cache_dir "$HOME/Data_Pressure/Cache_data" \ + +# --dataset_path "$HOME/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM" \ +# --subset_dir "$HOME/ML_Turbulent/DrivAerNet/train_val_test_splits" \ +# --cache_dir "$HOME/ML_Turbulent/Data_Pressure_Field/Cache_data" \ diff --git a/RegDGCNN_SurfaceFields/My_python_job/Model_Script/Model_Training.sh b/RegDGCNN_SurfaceFields/My_python_job/Model_Script/Model_Training.sh new file mode 100644 index 0000000..8370ec5 --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Model_Script/Model_Training.sh @@ -0,0 +1,22 @@ +python ./run_pipeline.py \ + --stages train \ + --exp_name "Train_Test" \ + --dataset_path "$HOME/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM" \ + --subset_dir "$HOME/ML_Turbulent/DrivAerNet/train_val_test_splits" \ + --cache_dir "$HOME/ML_Turbulent/Data_Pressure_Field/Cache_data" \ + --num_points 50000 \ + --num_workers 1 \ + --batch_size 6 \ + --epochs 60 \ + --test_only 0 \ + --gpus "0" + + + +# --dataset_path "$HOME/Data_Pressure/Pressure_VTK" \ +# --subset_dir "$HOME/ML_Turbulent/DrivAerNet/train_val_test_splits" \ +# --cache_dir "$HOME/Data_Pressure/Cache_data" \ + +# --dataset_path "$HOME/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM" \ +# --subset_dir "$HOME/ML_Turbulent/DrivAerNet/train_val_test_splits" \ +# --cache_dir "$HOME/ML_Turbulent/Data_Pressure_Field/Cache_data" \ diff --git a/RegDGCNN_SurfaceFields/My_python_job/Model_Script/Model_Visualize.sh b/RegDGCNN_SurfaceFields/My_python_job/Model_Script/Model_Visualize.sh new file mode 100644 index 0000000..542209c --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Model_Script/Model_Visualize.sh @@ -0,0 +1 @@ +python visualize_plt.py diff --git a/RegDGCNN_SurfaceFields/My_python_job/Model_Script/tail.sh b/RegDGCNN_SurfaceFields/My_python_job/Model_Script/tail.sh new file mode 100644 index 0000000..66d22c8 --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Model_Script/tail.sh @@ -0,0 +1,2 @@ +#!/bin/bash +tail -f ./logs/err_* diff --git a/RegDGCNN_SurfaceFields/My_python_job/Pressure_train.lsf b/RegDGCNN_SurfaceFields/My_python_job/Pressure_train.lsf new file mode 100644 index 0000000..ef92256 --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Pressure_train.lsf @@ -0,0 +1,44 @@ +#!/bin/bash +#BSUB -J DrivAerNet_Pressure_GPU # Job name +#BSUB -q 2v100-32-e5 # Queue name (change if needed) +#BSUB -n 1 # Number of CPU cores +#BSUB -R "span[ptile=1]" +#BSUB -gpu "num=1" +#BSUB -oo logs/out_%J.log # Standard output (%J = job ID) +#BSUB -eo logs/err_%J.log # Standard error +#BSUB -env "all" # Export your current environment + +# ------------------------------- +# Load environment and run script +# ------------------------------- + +echo "Starting DrivAerNet training on GPU nodes..." +echo "Running on host: $(hostname)" +echo "Job ID: $LSB_JOBID" +echo "Requested GPUs: $LSB_GPU_REQ" + +# Activate d2l +source ~/lib/miniconda3/etc/profile.d/conda.sh +conda activate d2l + +# Optional: verify GPU is available +nvidia-smi + +# Remove old logs but keep the ones for this job +find ./logs/ -name '*.log' ! -name "*_${LSB_JOBID}*.log" -delete + +# Run your Preprocess script + sh ./Model_Script/Model_Preprocess.sh + +# Run your training script + sh ./Model_Script/Model_Training.sh + +# Run your Evaluation script + sh ./Model_Script/Model_Evaluate.sh + +# Run your Visualize script(only in the local computer) +# sh ./Model_Script/Model_Visualize.sh + + + + diff --git a/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_Colorama.py b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_Colorama.py new file mode 100644 index 0000000..8840519 --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_Colorama.py @@ -0,0 +1,30 @@ +from colorama import Fore, Style + + logging.info(f"{Fore.GREEN}all_metrics: {all_metrics}{Style.RESET_ALL}") + -> {Style.RESET_ALL} + -> Reset the default color for the subsequent info + +from colorama import Fore + +Fore.BLACK +Fore.RED +Fore.GREEN +Fore.YELLOW +Fore.BLUE +Fore.MAGENTA +Fore.CYAN +Fore.WHITE +Fore.RESET # Reset to default + +from colorama import Back + +Back.BLACK +Back.RED +Back.GREEN +Back.YELLOW +Back.BLUE +Back.MAGENTA +Back.CYAN +Back.WHITE +Back.RESET # Reset to default + diff --git a/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_HyperParameter.py b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_HyperParameter.py new file mode 100644 index 0000000..f77e76b --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_HyperParameter.py @@ -0,0 +1,15 @@ +# === Variable === +1. batch_size + -> It defines how many samples are processed at once per training step + -> e.g. batch_size = 2 + -> each batchcontains 2 samples + -> In train.py + -> the 2 samples is .vtk files + -> each sample have two variable, points_tensor and pressure_tensor + +2. epochs + Total number of training rounds over the whole dataset + e.g. 10000 samples, batch_size = 100 + 10 000 / 100 = 100 batches for one epoch + epochs 150 have 150 times loop + diff --git a/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_data_loader.py b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_data_loader.py new file mode 100644 index 0000000..57c0822 --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_data_loader.py @@ -0,0 +1,248 @@ +from torch.utils.data import Dataset, Subset, DataLoader + +# ============ Class Usage ============ + +****class SurfacePressureDataset(Dataset): + -> Create a custom dataset by inheriting from torch.utils.data + +1.---- + def __init__(self, root_dir: str, num_points: int, preprocess=False, cache_dir=None): + -> The constructor; runs when an instance of the class is created +#! + self + -> refers to the current object of the class + +#! + self.root_dir = root_dir + -> Stores the input parameter "root_dir" inside the object + +#! + self.vtk_files = [os.path.join(root_dir, f) for f in os.listdir(root_dir) if f.endswith('.vtk')] + + os.path.join(root_dir, f) + -> Joins the folder path "root_dir" with the filename f + -> i.e. root_dir/f + + os.listdir(root_dir) + -> List all files and folder name in the "root_dir" directory + + f.endswith('.vtk') + -> Keeps those files end with .vtk + +2.---- + def __len__(self): +#! + len(self.vtk_files) + -> Number of .vtk files +Usage + print(len(dataset)) + -> This will call __len__(self) automatically + +3.---- + def _get_cache_path(self, vtk_file_path): +#! + base_name = os.path.basename(vtk_file_path).replace('.vtk', '.npz') + -> just get a string + +4.----difference among the method style + + 1.---- + __init__, __getitem__, __len__ + -> double underscore methods + -> They allow your class to work with built-in python behavior + + 2.---- + _get_cache_path + -> one underscore + -> For internal use only, but they can be accessed from outside if needed + 3.---- + sample_point_cloud_with_pressure + -> Normal public method + +#!------------------------------------- + def _get_cache_path(self, vtk_file_path): + base_name = os.path.basename(vtk_file_path).replace('.vtk', '.npz') + -> os.path.basename() + -> Get the last part of path + +#!--------------------------------------------------------------- + def __getitem__(self, idx): + mesh = pv.read(vtk_file_path) + -> Use pyvista to read a VTK mesh file + +#!-------------- + point_cloud, pressures = self.sample_point_cloud_with_pressure(mesh, self.num_points) + +#!--------------------------------------------------------------- + def sample_point_cloud_with_pressure(self, mesh, n_points=5000): + indices = np.random.choice(mesh.n_points, n_points, replace=False) + -> Randomly select n_points in mesh.n_points + -> indices is a NumPy array + -> replace=False + -> No repeated number in indices + +#!-------------- + indices = np.arange(mesh.n_points) + -> Create a NumPy array of evenly spaced integers + -> Example: + indices = np.arange(5) + array([0, 1, 2, 3, 4]) + +#!-------------- + sampled_points = mesh.points[indices] + -> Select only the points in mesh.points corresponding to the indices in "indices" + -> Example: + mesh.points = + [[0.0, 0.0, 0.0], # point 0 + [1.0, 0.0, 0.0], # point 1 + [1.0, 1.0, 0.0], # point 2 + [0.0, 1.0, 0.0]] # point 3 + + indices = np.array([1, 3]) + + [[1.0, 0.0, 0.0], # point 1 + [0.0, 1.0, 0.0]] # point 3 + +#!-------------- + sampled_pressures = sampled_pressures.flatten() + -> Convert a Multi-D array into a "1D" array + -> Example: + sampled_pressures = + [[1.0], + [2.0], + [3.0]] + + [1.0, 2.0, 3.0] + +#!-------------- + return pv.PolyData(sampled_points), sampled_pressures + -> Make it a PyVisata object easying for postprocess + -> sampled_points.shape + -> (N,3) + +# ============ Function Usage ============ +1. def create_subset(dataset, ids_file): + -> dataset: the full SurfacePressureDataset. all .vtk files + -> ids_file: file path + -> "/home/heng-924/ML_Turbulent/DrivAerNet/train_val_test_splits/train_design_ids.txt" + -> train, val, test + + #! + try block + -> Lets you safely run code that might crash + -> handle errors gracefully + -> Usage + -> try: + # risky code + except SomeError: + # handle the error + + #! + with open(ids_file, 'r') as file: + -> open ids_file in read mode + -> with ... as file: + Open this file, and automatically close + + #! + subset_ids = [id_.strip() for id_ in file.readlines()] + -> return a clearn list just value + -> ['0001', '0002', '0003'] + -> file.readlines() + -> Append '\n' for each element + -> example + ['0001\n', '0002\n', '0003\n'] + -> id_.strip() + -> Remove whitespcae characters for both ends of a string + + #! + subset_files = [f for f in dataset.vtk_files if any(id_ in f for id_ in subset_ids)] + -> dataset.vte_files + -> Defined in Class PressurePrediction + -> It is a array ['001.vtk', '002.vtk', ...] + -> any(id_ in f for id_ in subset_ids) + -> any[True, False] = True + -> any[False, False] = False + -> for id_ in subset_ids + -> This is a loop + -> id_ is a array stores whole subset_ids string + -> id_ in f + -> This is a condition + -> id_ denotes the current string, just one string + -> f for f in dataset.vtk_files + -> the first f: The value you wanna put into the new list + -> the second f: The variable name you use to loop over the old list + -> example + -> dataset.vtk_files = ['car_0001.vtk', 'car_0002.vtk', 'car_0003.vtk'] + subset_ids = ['0001', '0003'] + -> subset_files = ['car_0001.vtk', 'car_0003.vtk'] + + #! + return Subset(dataset, subset_indices) + -> from torch.utils.data import Dataset, Subset, DataLoader + -> Subset is an object of torch.utils.data + -> It maps your "subset index" to a "real index" in the full dataset + + + + + +2. def get_dataloaders(dataset_path: str, subset_dir: str, num_points: int, batch_size: int, + world_size: int, rank: int, cache_dir: str = None, num_workers: int = 4) -> tuple: + + #! + train_sampler = torch.utils.data.distributed.DistributedSampler( + train_dataset, num_replicas=world_size, rank=rank + ) + -> Split the dataset across multiple GPUs or processes + -> num_replicas = world_size + -> Total processes/GPUs will be used + + #! + train_dataloader = DataLoader( + train_dataset, batch_size=batch_size, sampler=train_sampler, + drop_last=True, num_workers=num_workers + ) + -> It turns a Dataset into mini-batches of data you can iterate over in a training loop. + -> Example + -> dataset = [data0, data1, data2, data3, ..., data9] + -> batch_size = 4 + -> for batch in loader: + print(batch) + -> [ data0, data1, data2, data3 ] + [ data4, data5, data6, data7 ] + + + -> drop_last=True + -> Drop the last incomplete batch if the dataset size is not divisible by the batech size + -> Example + -> Dataset has 103 samples + -> batch_size = 12 + -> the leftover of 7 is discarded + + -> train_dataloader = DataLoader( + train_dataset, batch_size=batch_size, sampler=train_sampler, + drop_last=True, num_workers=num_workers + ) + -> Whenever you ask me for batch, I will call train_dataset[i] + -> It is an object of type: torch.utils.DataLoader + -> It does not story the data directly. + -> It wraps a dataset and serves data in batches when you iterate + -> Usage + -> for ii, Batch in enumerate(train_dataloader): + logging.info(f"Batch: {ii}") + logging.info(f"Batch.points: {Batch.points}") + logging.info(f"Batch.Pressure: {Batch.pressure}") + -> Use enmuerate() to get index ii and the data in each batch + -> This will call dataset[ii] i.e. __getitem__ + -> train_dataloader.dataset.indices + -> List of selected sample indices + -> train_dataloader.dataset + -> Type is Subset + -> Subset is an object of torch.utils.data + -> train_dataloader.dataset.dataset + -> The full of class SurfacePressureDataset + -> dataset = train_dataloader.dataset.dataset + -> logging.info(f"Type of dataset: {type(dataset)}") + -> Checkout the methods and attributes in this object + -> logging.info(f"List all methods and attributs: {dir(dataset)}") + diff --git a/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_evaluate.py b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_evaluate.py new file mode 100644 index 0000000..caf5df1 --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_evaluate.py @@ -0,0 +1,137 @@ +""" + Usage of evaluate.py +""" + +#!--------------------------------------------------------------------------------------------- +def Initialize_model(args, device): +#! + state_dict = torch.load(args.model_checkpoint, map_location=device) + -> Pytorch function which Loads saved object + -> state_dict stores a model parameters i.e. weights and biases in a dictionary + -> Just directory, not the "model architecture" + -> This dictionary maps each layer name to its corresponding tensor + -> example: + { + "conv1.weight": tensor(...), + "conv1.bias": tensor(...), + ... + } + + -> args.model_checkpoint + -> The saved model + -> map_location=device + -> Tell PyTorch where to load the model parameters to + -> Especially when using different devices + +#! + model.load_state_dict(state_dict) + -> .load_state_dict() copies the weights and biases from state_dict into your current model instance + -> model = RegDGCNN_pressure(args_dict).to(device) + -> This is just a Initialized model, Create the "model architecture" + -> Which starts with random weights by default + +#!--------------------------------------------------------------------------------------------- +def prepare_dataset(args): + sample_indices = list(range(len(dataset))) + -> len(dataset) + -> Return How many samples are in the dataset + -> range(n) + -> Generates a sequence of numbers from 0 to n-1 + -> list() + -> The range object is an iterator, not a list + -> list() converts it into an explicit Python list + -> [0, 1, 2, 3, ...] + +#!--------------------------------------------------------------------------------------------- +def evaluate_model(model, dataset, sample_indices, args): + device = next(model.parameters()).device + -> model.parameters() + -> Returns an iterator over all model parameter + -> next() + -> Get the first parameter tensor + -> iterator is not list, it can not access by model.parameters()[0] + +#!---------------- + with torch.no_grad(): + -> Do not need gradients in evaluate part + -> Just use in train part + +#!---------------- + batch_metrics = calculate_metrics(normalized_targets.cpu().numpy(), normalized_outputs.cpu().numpy()) + -> .cpu() + -> Copy the tensor from GPU memory to CPU memory + -> .numpy() + -> Converts the CPU tensor into a NumPy array + +#!---------------- + sample_name = os.path.basename(vtk_file).replace('.vtk', '') + -> .os.path.basename() + -> Get the last part of a file path + -> i.e. the file name only + -> example: + -> vtk_file: /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM/N_S_WWS_WM_292.vtk + -> N_S_WWS_WM_292.vtk + -> .replace(',vtk', '') + -> N_S_WWS_WM_292.vtk + -> N_S_WWS_WM_292 + +#!---------------- + true_pressure_np = targets.cpu().numpy().squeeze() + -> squeeze() + -> Remove all dimensions with size 1 + -> Example: + x1 = torch.zeros(1, 3, 1, 5) + print(x.shape) # (1, 3, 1, 5) + x2 = x1.squeeze() + print(x2.shape) # (3, 5) + +#!---------------- + for metric_name, value in all_metrics[0].items(): + -> all_metrics + -> This is a list stores all the validation file + -> all_metrics[0].items() + -> Get the first file data + -> It returns am iterable view of key-value pairs + -> example: + d = {'a': 1, 'b': 2} + print(d.items()) + ([('a', 1), ('b', 2)]) + +#!---------------- + agg_metrics[metric_name] = np.mean([m[metric_name] for m in all_metrics]) + -> m[metric_name] for m in all_metrics + -> example: + all_metrics = [ + {'MSE': 0.01, 'MAE': 0.02, 'RMSE': 0.1}, + {'MSE': 0.02, 'MAE': 0.03, 'RMSE': 0.12}, + {'MSE': 0.015, 'MAE': 0.025, 'RMSE': 0.11} + ] + -> [0.01, 0.02, 0.015] + +#!---------------- + np.savez(os.path.join(results_dir, 'aggregated_metrics.npz'), **agg_metrics) + -> **agg_metrics + -> Unpack dict keys as separate arrays + -> Example: + agg_metrics = { + 'MSE': np.array([0.015]), + 'MSE_std': np.array([0.004]), + 'MAE': np.array([0.02]), + 'MAE_std': np.array([0.003]), + } + +---------------------------------------+ + | aggregated_metrics.npz | + | | + | 'MSE' ---> [0.015] | + | 'MSE_std' ---> [0.004] | + | 'MAE' ---> [0.020] | + | 'MAE_std' ---> [0.003] | + +---------------------------------------+ + -> How to access after loading + data = np.load("aggregated_metrics.npz") + print(data['MSE']) # [0.015] + print(data['MAE_std']) # [0.003] + + + + diff --git a/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_model_pressure.py b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_model_pressure.py new file mode 100644 index 0000000..c193610 --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_model_pressure.py @@ -0,0 +1,434 @@ +import torch +import torch.nn as nn + +# ====== Machinery Learning Knowledhe ====== +1. hyperparameter + -> A hyperparameter is configuration value that you set before training a machine learning model + -> It is not learned from data + -> It controls how the model is trained or structured + e.g. + learning rate, batch_size, droupout + +2. Batch Normalization + -> BatchNorm helps deep learning models train faster and more stable + -> Normalizing the outputs of a layer (i.e. make the data mean=0, std=1) + -> Then leaning how to scale and shift them again + + #! + -> This helps reduce the risk of "vanishing gradients" + -> And makes lreaning less sensitive to initialization or learning rate + +3. Channel + -> A channel is a set of values at each point that descirbe different types of information + -> Think of it as a feature layer stacked on top of another + + #! Real-World Analogy + 1. Imagine a color image: + -> A color image has 3 channel: + -> Red, Green, and Blue + 2. At each pixel (x,y), have 3 values: + -> Red value, Green value, Blue value + 3. So an image os shape (3, H, W) has + -> 3 channels(RGC) + -> Height H (Number of pixel top to bottom) + -> Width W (Number of pixel left to right) + e.g. (3, 224, 224) + + #! Current case 3D Point Cloud + Maybe + start with 6 channels x, y, z corrdinates + and p_i, p_j, p_k + the relative neighbor + -> Input: shape = (B, 6, N) + -> Conv2d: shape = (B, 64, N, K) + -> B: batch size + -> N: number of points + -> K: neighbors + -> 64: number of channels(feature types learned by model) + +4. Fully Connected Layer + + self.linear1 = nn.Linear(1024, 512, bias=False) + self.bn4 = nn.BatchNorm1d(512) + -> This type layer connects every input neuron to every output neuron using a weight + -> The weight is PyTorch defined and User can modify + -> example + Input: [x1, x2, x3] + + Output: y1 = w11*x1 + w12*x2 + w13*x3 + b1 + y2 = w21*x1 + w22*x2 + w23*x3 + b2 + +5. forward() + In PyTorch, the forward() method defines how your model processes input data to make a prediction + -> example + -> output = model(input) + same as + -> output = model.forward(input) + -> forward() How data flows forward through the network + +6. nn.Linear() + -> fc = nn.Linear(Input_Channel, Out_Channel) + -> Input shape = [Batch_size, channel] + -> Output_shape = [Batch_size, channel] + +7. nn.Conv1d() + -> fc = nn.Conv1d(Input_Channel, Out_Channel) + -> Input shape = [Batch_size, channel, Length] + -> Output_shape = [Batch_size, channel, Length] + -> Physical Example + -> Channel = 3 + -> Length = 5, value for 5 timestep + -> channels = [ + [ 2, 4, 6, 8, 10 ], # temperature + [ 1, 3, 5, 7, 9 ], # pressure + [ 0, 1, 1, 2, 2 ] # humidity + ] + + +8. nn.Conv2d() + -> fc = nn.Conv2d(Input_Channel, Out_Channel) + -> Input shape = [Batch_size, channel, Height, Width] + -> Output_shape = [Batch_size, channel, Height, Width] + +9. kernel_size + -> Conv1d() + -> x = [2, 4, 6, 8, 10] + -> 1 input and 1 output + -> kernel_size = 1, weight = 0.5, no bias + Input : shape=(B=1, C=1, L=5) + Output: shape=(B=1, C=1, L=5) + y = [2*0.5, 4*0.5, 6*0.5, 8*0.5, 10*0.5] + = [1.0, 2.0, 3.0, 4.0, 5.0] + + -> kernel_size = 3, weights = [0.2, 0.5, 0.3], no bias + Apply to [0, 2, 4]: + y[0] = 0*0.2 + 2*0.5 + 4*0.3 = 0 + 1.0 + 1.2 = 2.2 + + Apply to [2, 4, 6]: + y[1] = 2*0.2 + 4*0.5 + 6*0.3 = 0.4 + 2.0 + 1.8 = 4.2 + + Apply to [4, 6, 8]: + y[2] = 4*0.2 + 6*0.5 + 8*0.3 = 0.8 + 3.0 + 2.4 = 6.2 + + Apply to [6, 8, 10]: + y[3] = 6*0.2 + 8*0.5 + 10*0.3 = 1.2 + 4.0 + 3.0 = 8.2 + + Apply to [8, 10, 0]: + y[4] = 8*0.2 + 10*0.5 + 0*0.3 = 1.6 + 5.0 + 0 = 6.6 + -> y = [2.2, 4.2, 6.2, 8.2, 6.6] + + + + +# ====== Class Knowledge ====== +1. +class RegDGCNN_SurfaceFields(nn.Module): + -> This defines a custom Pytorch Neural Network + -> It inherits from nn.Moudle, which is the base class for all PyTorch models + + #! + super().__init__() + -> Calls the parent class nn.Module constructor to set up internal PyTorch machinery + +#!---------------------------------------------------------------------------- + def forward(self, x): + x0 = get_graph_feature(x, k=self.k) # (batch_size, 3, num_points) -> (batch_size, 3*2, num_points, k) + t = self.transform_net(x0) # (batch_size, 3, 3) + x = x.transpose(2, 1) # (batch_size, 3, num_points) -> (batch_size, num_points, 3) + x = torch.bmm(x, t) # (batch_size, num_points, 3) * (batch_size, 3, 3) -> (batch_size, num_points, 3) + x = x.transpose(2, 1) # (batch_size, num_points, 3) -> (batch_size, 3, num_points) + -> The above 5 lines is a module-like stuff + -> t = self.transform_net(x0) uses edge features to predict a 3*3 transformation matrix per batch sample + -> x = torch.bmm(x, t) applies this matrix to all points to align them before further feature extraction + -> .bmm() is a matrix multiply function + -> (B, N, 3) * (B, 3, 3) -> (B, 3, 3) + + + + +2. +class Transform_Net(nn.Module): + -> This modules learns a transformation matrix to align the input point cloud or local features + + 1. def __init__(self, args): + #! + self.k = 3 + -> Final transformation output is a 3*3 matrix + + #! + self.bn1 = nn.BatchNorm2d(64) + -> 64 means: the input has 64 channels + -> It is used after a "Conv2d" layer that output shape maybe (batch_size, 64, height, width) + + #! + nn.LeakyReLU(negative_slope=0.2) + -> It is an activation function in PyTorch + -> Introduces non-linearity into your model + -> Allows neural networks to learn complex patterns e.g. curves, edges, relationships + -> example, alpha = 0.2 + -> f(x) = x if x > 0 + = α * x if x ≤ 0 + -> x = torch.tensor([-3.0, -1.0, 0.0, 1.0, 3.0]) + act = nn.LeakyReLU(negative_slope=0.2) + y = act(x) + print(y) # tensor([-0.6000, -0.2000, 0.0000, 1.0000, 3.0000]) + + #! + nn.Conv2d(6, 64, kernel_size=1, bias=False) + -> Creates a 2D convolutional layer in PyTorch + -> It processes input data and learns to extract meaningful features from it + -> 6: Input channel + -> 64: Output channel + -> kernel_size=1: Use a 1*1 filter i.e. pointwise convolution + -> bias=False: Don not add a learnable bias. Often used with BatchNorm after + + #! + self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False), + self.bn1, + nn.LeakyReLU(negative_slope=0.2)) + -> Define a layer: self.conv1 + -> nn.Sequential() executes the layer in order + -> This "conv1" layer executes this Sequence: Input -> Conv2d -> BatchNorm2d -> LeakyReLU -> Output + + #! + self.linear1 = nn.Linear(1024, 512, bias=False) + self.bn4 = nn.BatchNorm1d(512) + -> Fully Connected Layer + -> Use BatchNorm make data more correct + + #! + self.transform = nn.Linear(256, 3*3) + -> Create a fullt connected layer + -> This is the final layer in "Transform_Net" that outputs a learned transformation matrix + -> Input: (B, 256) + -> Output: (B, 9) -> reshape to -> (B, 3, 3) + + #! + init.constant_(self.transform.weight, 0) + -> Reset all weights to 0 + -> So initially, the layer does not learn anything yet + -> Its output only depends on the bias + -> This ensures the transformation starts from Identity + + #! + init.eye_(self.transform.bias.view(3, 3)) + -> self.transform.bias is a flat vector of shape (9, ) + -> view(3,3) reshape it into a matrix + -> init.eye() fills it with an identity matrix + [1, 0, 0] + [0, 1, 0] + [0, 0, 1] + 2. def forward(self, x): + -> x is the input data + -> x.shape = (batch_size, 128, num_points, k) + + #! + batch_size = x.size(0) + -> Get the size of the first dimension of the tensor x + + #! + x = x.max(dim=-1, keepdim=False)[0] # (batch_size, 128, num_points, k) -> (batch_size, 128, num_points) + -> .max() function retuns two things + -> (values, index) = x.max(dim=, keepdim=) + -> values: the maximux value along the given dimension + -> index : the positon of the maximum value + -> dim=-1 + -> -1 refers to the last dimension + -> In this case: k = neighbors + -> [0] + -> (values, indices) + -> returns the max values + -> keepdim=False + -> Removes the reduced dimension + -> True: keeps the reduced dimension with size 1 + -> [batch_size, 128, numpoints, 1] + #! + x = F.leaky_relu(self.bn4(self.linear1(x)), negative_slope=0.2) # (batch_size, 1024) -> (batch_size, 512) + -> Sequence: self.linear1(x) -> self.bn4 -> F.leadky_relu(x, slope) + +#!---------------------------------------------------------------------------- + def get_graph_feature(x, k=20, idx=None, dim9=False): + + -> x = x.view(batch_size, -1, num_points) + -> In PyTorch, x.view() reshapes a tensor without changing its data + -> Similar to NumPy .reshape() + -> Must specify the new shape using dimensions + -> -1 + -> Auto-computes this dimension size + -> Make sure feature dimension is flexible + -> Example: + -> x = torch.randn(2, 3, 4) + -> Total element: 2 × 3 × 4 = 24 + -> The -1 size: 2 × ? × 4 = 24 + => ? = 3 + +#!------------------------ + idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points + -> torch.arange(0, batch_size, device=device) + -> Create a "1D" tensor of integers from '0' not including 'batch_size' + -> Result: ([0, 1, 2, ..., batch_size-1]) + -> .view(-1, 1, 1) + -> Shape changes from [batch_size-1] to (batch_size-1, 1, 1) + -> * num_points + -> Each batch has "num_points" points + -> First batch: indices [0 , num_points-1] + -> second batch: indices [num_points-1, 2*num_points-1] + -> ... + +#!------------------------ + idx = idx + idx_base + -> idx_base is an offset + -> The index is changed by offset + -> Example: + -> idx before adjustment (batch 0): [0, 1, 2] + idx before adjustment (batch 1): [0, 1, 2] + -> batch 0 offset: 0 + → indices stay [0, 1, 2] + batch 1 offset: num_points=5 + → indices become [5, 6, 7] + +#!------------------------ + idx = idx.view(-1) + -> Flatten this tensor into a "1D" vector + -> To use these indices to directly index a flat array + -> idx.shape = (batch_size * num_points * k) + +#!------------------------ + x = x.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) + -> After .transpose(), the memory maybe not continuous + -> .contiguous() make sure continuous + -> Without it, maybe shows RuntimeError + -> RuntimeError: view size is not compatible with input tensor's size and stride... + +#!------------------------ + feature = x.view(batch_size * num_points, -1)[idx, :] + -> Now I can find each x_j for each x_i + -> "feature" holds the neighbor points x_j + -> feature.shape = (batch_size * num_points * k, num_dims) + -> x.view() + -> Reshape x to (batch_size*num_points, num_dims) + -> "-1" stands for the last dimension + -> [idx, :] + -> use "idx" to select specific points in this flattened array + -> Specific points i.e. the "K nearest" points + -> Each row is a point, the column is the point coordinates + -> Select the rows from feature whose indices are given by 'idx' + -> : + -> Means select all columns for each row + +#!------------------------ + feature = feature.view(batch_size, num_points, k, num_dims) + -> Reshape "feature" + -> Test + -> logging.info(f"feature batch 0, point 0, k 0: {feature[0, 0, 0,:]}") + -> logging.info(f"feature batch 0, point 0, k 1: {feature[0, 0, 1,:]}") + -> logging.info(f"feature batch 0, point 2, k 0: {feature[0, 2, 0,:]}") + -> logging.info(f"feature batch 0, point 2, k 1: {feature[0, 2, 1,:]}") + -> point i with 'k' neareast point + -> The last dimension is the 'k' nearest point coordinate +#!------------------------ + x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1) + -> Reshape "x" to [batch_size, num_points, k, num_dims] + -> Repeted k times for each points + +#!------------------------ + feature = torch.cat((feature - x, x), dim=3).permute(0, 3, 1, 2).contiguous() + -> feature - x + -> means x_j - x_i + -> torch.cat((feature - x, x), dim=3) + -> dim = 3 means "0, 1, 2, 3" slot for shape size + -> build a pair + -> (x_j - x_i, x_i) + -> 3+3 + -> After cat + -> num_dim(diff) + num_dim(center point) = 2 * num_dims + -> feature.shape = (batch_size, num_points, k, 2*num_dims) + -> .permute(0, 3, 1, 2) + -> feature.shape = (batch_size, 2*num_dims, num_points, k) + + +#!---------------------------------------------------------------------------- + def knn(x, k): + -> inner = -2 * torch.matmul(x.transpose(2, 1), x) + -> inner.shape = (batch_size, num_points, num_points) + -> It is an inner product operation + -> Low dimension size Example: + -> x.shape = (1, 2, 3) + -> x = torch.tensor([[[1.0, 2.0, 3.0], # x coordinates [4.0, 5.0, 6.0]]]) # y coordinates + -> x.transpose(2, 1) + -> [ + [[1.0, 4.0], # point 0 + [2.0, 5.0], # point 1 + [3.0, 6.0]] # point 2 + ] + -> inner = torch.matmul(x.transpose(2, 1), x) + -> inner.shape = (1, 3, 3) + -> [ + [P_0*P_0, P_0*P_1, P_0*P_2], + [P_1*P_0, P_1*P_1, P_1*P_2], + [P_2*P_0, P_2*P_1, P_2*P_2] + ] + -> Further step + -> inner[0][0] = [P_0*P_0, P_0*P_1, P_0*P_2] + -> inner[0][0][0] = P_0*P_0 + + -> Test code + -> logging.info(f"inner.shape: {inner.shape}") + -> logging.info(f"inner[0][0] value: {inner[0, 3, :]}") + +#!------------------------ + xx = torch.sum(x ** 2, dim=1, keepdim=True) + -> xx.shape = (batch_size, 1, num_points) + -> Compute the squared norm of each point, ||x|| = x_1^2 + x_2*2 + x_3*2 + -> dim=1 + -> dim argu controls which axis you sum along + -> keepdim=True + -> keep the summation dimension with size 1 + -> i.e. xx.shape = (1, 1, num_points) + -> Examples: xx.shape = [1, 1, 3] + -> xx = [[P0_norm, P1_norm, P2_norm]] + -> xx_T = xx.transpost(2,1) + -> xx_T.shape = [1, 3, 1] + -> [[P0_norm], [P1_norm], [P2_norm]] + +#!------------------------ + pairwise_distance = -xx - inner - xx.transpose(2, 1) + -> pairwise_distance is a negative value between points + -> And I think it is bullshit about operating dimension in PyTorch!!! + -> "I get it!" The system will automatically broadcast into same shape + -> xx.shape = (batch_size, 1 , num_points) + -> inner.shape = (batch_size, num_points, num_points) + -> xx_T.shape = (batch_size, num_points, 1) + -> pair_distance.shape = (batch_size, num_points, num_points) + -> Example: + -> pair_distance.shape = (1 ,2, 2) + -> [[P0-P0, P0-P1], [P1-P0], [P1-P1]] + +#!------------------------ + idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k) + -> Select the top K largest values along dimension -1 i.e. The last dimension + -> The distance is all negative, The largest value means nearest point + -> .topk(k=k, dim=-1) + -> Example: pairwise_distance[0, 0] = [-1, -4, -10] + -> pairwise_distance[0, 0].topk(k=2) + -> Return value is [-1, -4] + -> [1] + -> values, indices = tensor.topk(...) + -> We just need indices + -> Checkout + -> value = pairwise_distance.topk(k=k, dim=-1)[0] # (batch_size, num_points, k) + -> idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k) + -> logging.info(f"point 3: {value[0,3,:], idx[0,3,:]}") + + +#!------------------------ + + + + + + + + + + diff --git a/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_run_pipeline.py b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_run_pipeline.py new file mode 100644 index 0000000..d3e13bd --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_run_pipeline.py @@ -0,0 +1,57 @@ +# ======== Hyperparameter ======== +1. + self.k = args['k'] + -> k + -> Number of nearnest neighbors(for graph edge construction) + +2. + +# ======== function usage ======== + +#!------------------------- + for key, value in env.items(): + print(f"{key} = {value}") + +key : the name of environment variable +value : key's value +f"{key} = {value}" : f-strings(formatted strings) + +#!------------------------- + stages = args.stages.splits(',') if ',' in args.stages else [args.exp_name] + This is a ternary expression + stages = if else + + -> e.g. --stage "preprocess, train" + -> stages = ['preprocess, train'] + + -> e.g. --stage "all" + -> stages = [all] + +#!------------------------- + results = {} + -> create an empty directory + -> e.g. results = { + "preprocess": True, + "train": flase, + "evaluate": True + } + -> e.g. results.get('train', False) + -> resluts have train, get True + whether get False + +#!------------------------- + logging.info + -> Need explicit declaration + + + +#!------------------------- +def preprocess_data(args): + try: + # some code ... + except Exception as e: + logging.error(f"Preprocessing failed with error: {e}") + return False + -> If any error happens in the try block, Python immediately jumps to except block instead of crashing + -> The error object is saved as e + diff --git a/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_train.py b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_train.py new file mode 100644 index 0000000..e8bc57f --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_train.py @@ -0,0 +1,344 @@ +# ============ Function Usage ============ +import torch +import torch.distributed as dist +import torch.multiprocessing as mp + +# Import modules +from model_pressure import RegDGCNN_pressure + + +# DDP: Distributed Data Parallel +1.---- + world_size = len(gpu_list.split(',')) +#! + gpu_list -> "0, 1, 2" + gpu_list.split(',') -> ['0', '1', '2'] + len(gpu_list.split(',')) -> 3 + +2.---- + exp_dir = os.path.join('experiments', args.exp_name) +#! + Build a path "./experiments/exp_name" + +3.---- + os.makedirs(exp_dir, exist_ok=True) +#! + Create the directory "exp_dir" if it doesn't already exist + +4.---- + mp.spawn(train_and_evaluate, args=(world_size, args), nprocs=world_size, join=True) +#! + train_and_evaluate(rank, world_size, args) + # rank : which GPU this process is using + # world_size : total number of GPUS + # args: your parsed command-line arguments +#! + mp.spawn(...) + -> + train_evaluate(rank=0, world_size= , args=args) + train_evaluate(rank=1, world_size= , args=args) + train_evaluate(rank=2, world_size= , args=args) + train_evaluate(rank=3, world_size= , args=args) + ... + +5.---- + dist.init_process_group(backend='nccl', init_method='env://', world_size=world_size, rank=rank) +#! + Starts the communication backend for DDP + # nccl: NVDIA backend for multi-GPU + # evv:// Uses evvironmen variables(e.g. RANK, WORLD_SIZE, etc) + +6.----- + torch.cuda.set_device(local_rank) +#! + Each process uses a single GPU + Set the GPU this process will use + +7.----- + args = vars(args) +#! + Convert it to a regular dictionary + e.g. {'epoch': 10, 'lr': 0.0001} + +8.----- + model = RegDGCNN_pressure(args).to(local_rank) +#! + RegDGCNN_pressure(args): Creates an instance of your custom model class + .to(local_rank): Moves the model to the current corret GPU +#! + The detail please see Usage_model_pressure.pu + +10.---- + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[local_rank], + find_unused_parameters=True, + output_device=local_rank + ) +#! + Wrap it with DDP so Pytorch handles multi-GPU + synchronization and data parallelism +#! + model + The model aleady moved to the local GPU +#! + device_ids=[local_rank] + Restrict this process to use only one GPU +#! + find_unused_parameters=True + It tells pyTorch "Some layers in my model might not be used every time I call forward() + -> so please handle that correctly" + The detail please see Usage_model_pressure.py forwar() function + +#! + output_device=local_rank + Ensures outputs go to the same GPU as inputs + +11.---- + criterion = torch.nn.MSELoss() +#! + Loss function: + This sets up Mean Squared Error(MSE) as the loss function, commonly used for regression problems + It measures the average of the squares of the differences between predicted and actual values: + MSE = 1/n * sum((yi - y)^2) i = 1, ... , n + +12.---- + optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4) +#! + Uses Adam(Adaptive Moment Estimation), a popular optimizer that adjusts learning rates for each parameter +#! + model.parameters() + ->Tell the optimizer which parameters to update +#! + lr = args.lr + ->learning rate +#! + weight_decay=1e-4 + ->adds L2 regularization to reduce overfitting + +13.---- + scheduler = ReduceLROnPlateau(optimizer, 'min', patience=10, factor=0.1, verbose=True) +#! + Automatically reduces the learning rate when the validation loss plateaus(stops improving) +#! + 'min' + ->Try to minimize val loss + -> By default 1e-4 + +#! + patience=10 + ->Wait for 10 epochs without improvement + +#! + factor=0.1 + ->Multiplies LR by 0.1 + +#! + verbose=True + -> print a message whenever the learning rate changes +#! + e.g. + Epoch 1 - Validation Loss: 0.470 + LR : 0.01 + Epoch 2 - Validation Loss: 0.471 + LR : 0.01 + Epoch 3 - Validation Loss: 0.471 + LR : 0.01 + Epoch 4 - Validation Loss: 0.470 + LR : 0.01*0.1 + +14.---- + train_dataloader.sampler.set_epoch(epoch) + -> DDP needed function + -> Helps ensure different GPU processes don't get the same data every epoch + +15.---- + train_loss = train_one_epoch(model, train_dataloader, optimizer, criterion, local_rank) + -> function for training + +#! + model.train() + -> bulit-in function for nn.Module API + -> set the model to training mode + +#! + for data, targets in tqdm(train_dataloader, desc="[Training]"): + -> tqdm is just a process bar + -> [Training]: 56%|█████████████████▌ | 50/90 [00:05<00:04, 8.23it/s] + -> Tells you are on batch 50/90 + -> Is the same as " for data, targets in train_dataloader " + +#! + data, targets = data.squeeze(1).to(local_rank), targets.squeeze(1).to(local_rank) + -> .to(local_rank) sent data to GPU "local_rank" + -> .squeeze(1) rm the element "1" in the data and target + -> I do not know "1" stands for what + +#! + targets = (targets - PRESSURE_MEAN) / PRESSURE_STD + -> Normalizes the ground truth targets(pressure values) +PRESSURE_MEAN = -94.5 +PRESSURE_STD = 117.25 + +#! + optimizer.zero_grad() + -> Clears previous gradients stored in the model(from last batch) + +#! + outputs = model(data) + -> outputs could be predicted pressure values + -> Triggers DDP _call_() method + -> DDP calls forward(data) function + -> forward() defined in model_pressure.py + -> Equivalent to outputs = model.forward(data) + +#! + loss = criterion(outputs.squeeze(1), targets) + -> MSE function for Pressure part + -> loss = ((outputs - targets)**2).mean() + +#! + loss.backward() + -> Computes gradients of the loss w.r.t all model parameters + -> See model_pressure.py backward() function + +#! + optimizer.step() + -> Updates the model weights using the computed gradients in loss.backward() + +#! + total_loss += loss.item() + -> loss.item() converts the scalar tensor to a python number + +#! + return total_loss / len(train_dataloader) + -> Returns the average loss per batch over the entire epoch + -> len(train_dataloader) the number of batches passed from the command-line + +16.---- + total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + -> Calculates the total number of trainable parameters in a PyTorch model + + #! + model.parameters() + -> Returns an iterator over all parameters + + #! + p.requires_grad + -> True: PyTorch will update it during training + -> False: Leave it unchanged + + #! + p.numel() + -> Return the number of trainable parameters in each layer + -> example + p = torch.randn(3, 4) + p.numel() # → 12 (because 3 rows × 4 columns = 12 elements) + +17.---- + train_dataloader.sampler.set_epoch(epoch) + -> Ensure different GPUs shuffle differently at each epoch + + +18.---- + def train_one_epoch(...) + #! + targets = (targets - PRESSURE_MEAN) / PRESSURE_STD + -> Normalize all "num_points" per sample + +19.---- + torch.save(model.state_dict(), best_model_path) + #! + model.state_dict () + -> Returns a dictionary containing all the model learnable parameters i.e. weights and biases + + #! + torch.save() + -> This function can save a PyTorch object to a file + +20.---- + model.load_state_dict(torch.load(best_model_path, map_location=f'cuda:{local_rank}')) + #! + torch.load() + -> load the model into a specified GPU + -> Return a dictionary + #! + model.load_state_dict() + -> Assigin that directinoary to your model parameters + +21.---- + rel_l2 = torch.mean(torch.norm(normalized_outputs - normalized_targets, p=2, dim=-1) / + torch.norm(normalized_targets, p=2, dim=-1)) + #! + -> L2_relative = norm(diff) / norm(targets) + -> Before mean() L2_relative is a tensor for every sample + -> We need take mean() for "batch_size" samples + -> i.e. just a scalar value for current batch + +13.---- + batch_size = targets.size(0) + -> Get the first dimension + +14.---- + outputs = model(data) + mse = criterion(normalized_outputs, normalized_targets) + -> outputs is a tensor + -> mse is a scalar value + +15.---- + all_outputs.append(normalized_outputs.cpu()) + -> normalized_outputs.cpu() + -> Move the GPU tensor to CPU tensor + -> all_outputs.append() + -> Save all results to a big list + +16.---- + dist.reduce(total_mse_tensor, dst=0, op=dist.ReduceOp.SUM) + -> dst=0 + -> The target is rank=0 + -> op=dist.ReduceOp.SUM) + -> SUM all total_mse_tensor value to rank=0 + +17.---- + ss_tot = np.sum((all_targets - np.mean(all_targets)) ** 2) + -> Measures total variance in the true data + -> i.e. How much the targets deviate from their mean + -> Total Sum of Squares + -> SS_tot = SUM((y_i - y^)**2) + +18.---- + ss_res = np.sum((all_targets - all_outputs) ** 2) + -> Measures the error between predictions and true values + -> Residual Sum of Squares + -> SS_res = SUM((y_i - y_i^)**2) + +19.---- + r_squared = 1 - (ss_res / ss_tot) if ss_tot > 0 else 0 + -> R^2 score + -> coefficient of determination + -> Measures How well your predictionbs approximate the truth data + -> R^2 = 1 - SS_res / SS_tot + -> Physcial Meaning + -> R^2 = 1 + -> Perfect Prediction + -> R^2 = 0 + -> Predictions no better than using mean value + -> R^2 < 0 + -> Predictions worse that using mean value + +20.---- + MAE = 1/N * (abs(y_i - y^)) + -> Mean Absolute Error + + MSE= 1/N * (abs(y_i - y^)**2)) + -> Mean Squared Error + + + + + + + + + + diff --git a/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_visualize_ply.py b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_visualize_ply.py new file mode 100644 index 0000000..0bde004 --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/Usage_python/Usage_visualize_ply.py @@ -0,0 +1,66 @@ +import numpy as np +import matplotlib.pyplot as plt + +#!---------------- + fig = plt.figure(figsize=(10, 5)) + -> Create a new figure for plotting + -> set width 10 inches, height to 5 inches + +#!---------------- + ax = fig.add_subplot(121, projection='3d') + -> Add a subplot to your figure + -> 121 + -> 1 row, 2 columns + -> "1st" The first subplot + -> projection='3d' + -> Create a "3D" plot + +#!---------------- + p = ax.scatter(points[:, 0], points[:, 1], points[:, 2], c=true_p, cmap='jet', s=1) + -> Create a "3D" scatter plot on the first subplot "ax" + -> points[:,0] + -> x-coordinate of each point + -> points[:,1] + -> y-coordinate of each point + -> points[:,2] + -> z-coordinate of each point + -> c=trup_p + -> Color each point using "true pressure value" + -> cmap='jet' + -> Use jet color map(blue -> green -> yellow -> red) + -> Low values: blue + -> Mid values: yellow + -> High values: red + -> s=1 + -> s stands for 'size' of the points in the scatter plot + -> s = 10 + -> Medium points + -> s = 50 + -> Big points + +#!---------------- + fig.colorbar(p, ax=ax) + -> Add a colorbar next to the first subplot + +#!---------------- + os.makedirs(visualization_path, exist_ok=True) + -> Create folder if it does not exist + +#!---------------- + plt.savefig(os.path.join(visualization_path, "visualization.png"), dpi=300) + -> dpi + -> dots for per inch + -> low resolution + -> dpi = 72 + -> common for screen display + -> high resolution + -> dpi = 300 + -> Used in scientific paper + +#!---------------- + plt.tight_layout() + -> Automatically adjusts the spacing between subplots and surrounding text to the prevent overlap + + + + diff --git a/RegDGCNN_SurfaceFields/My_python_job/data_loader.py b/RegDGCNN_SurfaceFields/My_python_job/data_loader.py new file mode 100644 index 0000000..4cdf133 --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/data_loader.py @@ -0,0 +1,204 @@ +# data_loader.py +""" +@author: Mohamed Elrefaie, mohamed.elrefaie@mit.edu + +Data loading utilities for the DrivAerNet++ dataset. + +This module provides functionality for loading and preprocessing point cloud data +with pressure field information from the DrivAerNet++ dataset. +""" + +import os +import numpy as np +import torch +from torch.utils.data import Dataset, Subset, DataLoader +import torch.distributed as dist +import pyvista as pv +import logging + +from colorama import Fore, Style + +class SurfacePressureDataset(Dataset): + """ + Dataset class for loading and preprocessing surface pressure data from DrivAerNet++ VTK files. + + This dataset handles loading surface meshes with pressure field data, + sampling points, and caching processed data for faster loading. + """ + + def __init__(self, root_dir: str, num_points: int, preprocess=False, cache_dir=None): + """ + Initializes the SurfacePressureDataset instance. + + Args: + root_dir: Directory containing the VTK files for the car surface meshes. + num_points: Fixed number of points to sample from each 3D model. + preprocess: Flag to indicate if preprocessing should occur or not. + cache_dir: Directory where the preprocessed files (NPZ) are stored. + """ + self.root_dir = root_dir + self.vtk_files = [os.path.join(root_dir, f) for f in os.listdir(root_dir) if f.endswith('.vtk')] + self.num_points = num_points + self.preprocess = preprocess + self.cache_dir = cache_dir if cache_dir else os.path.join(root_dir, "processed_data") + + if not os.path.exists(self.cache_dir): + os.makedirs(self.cache_dir) + + def __len__(self): + return len(self.vtk_files) + + def _get_cache_path(self, vtk_file_path): + """Get the corresponding .npz file path for a given .vtk file.""" + base_name = os.path.basename(vtk_file_path).replace('.vtk', '.npz') + return os.path.join(self.cache_dir, base_name) + + def _save_to_cache(self, cache_path, point_cloud, pressures): + """Save preprocessed point cloud and pressure data into an npz file.""" + np.savez_compressed(cache_path, points=point_cloud.points, pressures=pressures) + + def _load_from_cache(self, cache_path): + """Load preprocessed point cloud and pressure data from an npz file.""" + data = np.load(cache_path) + point_cloud = pv.PolyData(data['points']) + pressures = data['pressures'] + return point_cloud, pressures + + def sample_point_cloud_with_pressure(self, mesh, n_points=5000): + """ + Sample n_points from the surface mesh and get corresponding pressure values. + + Args: + mesh: PyVista mesh object with pressure data stored in point_data. + n_points: Number of points to sample. + + Returns: + A tuple containing the sampled point cloud and corresponding pressures. + """ + if mesh.n_points > n_points: + indices = np.random.choice(mesh.n_points, n_points, replace=False) + else: + indices = np.arange(mesh.n_points) + logging.info(f"Mesh has only {mesh.n_points} points. Using all available points.") + + sampled_points = mesh.points[indices] + sampled_pressures = mesh.point_data['p'][indices] # Assuming pressure data is stored under key 'p' + sampled_pressures = sampled_pressures.flatten() # Ensure it's a flat array + + return pv.PolyData(sampled_points), sampled_pressures + + def __getitem__(self, idx): + vtk_file_path = self.vtk_files[idx] + cache_path = self._get_cache_path(vtk_file_path) + + # Check if the data is already cached + if os.path.exists(cache_path): + logging.info(f"Loading cached data from {cache_path}") + point_cloud, pressures = self._load_from_cache(cache_path) + else: + if self.preprocess: + logging.info(f"Preprocessing and caching data for {vtk_file_path}") + try: + mesh = pv.read(vtk_file_path) + except Exception as e: + logging.error(f"Failed to load VTK file: {vtk_file_path}. Error: {e}") + return None, None # Skip the file and return None + + point_cloud, pressures = self.sample_point_cloud_with_pressure(mesh, self.num_points) + + # Cache the sampled data to a new file + self._save_to_cache(cache_path, point_cloud, pressures) + else: + logging.error(f"Cache file not found for {vtk_file_path} and preprocessing is disabled.") + return None, None # Return None if preprocessing is disabled and cache doesn't exist + + point_cloud_np = np.array(point_cloud.points) + point_cloud_tensor = torch.tensor(point_cloud_np.T[np.newaxis, :, :], dtype=torch.float32) + pressures_tensor = torch.tensor(pressures[np.newaxis, :], dtype=torch.float32) + + return point_cloud_tensor, pressures_tensor + + +def create_subset(dataset, ids_file): + """ + Create a subset of the dataset based on design IDs from a file. + + Args: + dataset: The full dataset + ids_file: Path to a file containing design IDs, one per line + + Returns: + A Subset of the dataset containing only the specified designs + """ + try: + with open(ids_file, 'r') as file: + subset_ids = [id_.strip() for id_ in file.readlines()] + subset_files = [f for f in dataset.vtk_files if any(id_ in f for id_ in subset_ids)] + subset_indices = [dataset.vtk_files.index(f) for f in subset_files] + if not subset_indices: + logging.error(f"No matching VTK files found for IDs in {ids_file}.") + return Subset(dataset, subset_indices) + except FileNotFoundError as e: + logging.error(f"Error loading subset file {ids_file}: {e}") + return None + + +def get_dataloaders(dataset_path: str, subset_dir: str, num_points: int, batch_size: int, + world_size: int, rank: int, cache_dir: str = None, num_workers: int = 4) -> tuple: + """ + Prepare and return the training, validation, and test DataLoader objects. + + Args: + dataset_path: Path to the directory containing VTK files + subset_dir: Directory containing train/val/test split files + num_points: Number of points to sample from each mesh + batch_size: Batch size for dataloaders + world_size: Total number of processes for distributed training + rank: Current process rank + cache_dir: Directory to store processed data + num_workers: Number of workers for data loading + + Returns: + A tuple of (train_dataloader, val_dataloader, test_dataloader) + """ + full_dataset = SurfacePressureDataset( + root_dir=dataset_path, + num_points=num_points, + preprocess=True, + cache_dir=cache_dir + ) + + train_dataset = create_subset(full_dataset, os.path.join(subset_dir, 'train_design_ids.txt')) + val_dataset = create_subset(full_dataset, os.path.join(subset_dir, 'val_design_ids.txt')) + test_dataset = create_subset(full_dataset, os.path.join(subset_dir, 'test_design_ids.txt')) + + # Distributed samplers for DDP + train_sampler = torch.utils.data.distributed.DistributedSampler( + train_dataset, num_replicas=world_size, rank=rank + ) + val_sampler = torch.utils.data.distributed.DistributedSampler( + val_dataset, num_replicas=world_size, rank=rank + ) + test_sampler = torch.utils.data.distributed.DistributedSampler( + test_dataset, num_replicas=world_size, rank=rank + ) + + train_dataloader = DataLoader( + train_dataset, batch_size=batch_size, sampler=train_sampler, + drop_last=True, num_workers=num_workers + ) + val_dataloader = DataLoader( + val_dataset, batch_size=batch_size, sampler=val_sampler, + drop_last=True, num_workers=num_workers + ) + test_dataloader = DataLoader( + test_dataset, batch_size=batch_size, sampler=test_sampler, + drop_last=True, num_workers=num_workers + ) + + return train_dataloader, val_dataloader, test_dataloader + + +# Constants for normalization +PRESSURE_MEAN = -94.5 +PRESSURE_STD = 117.25 diff --git a/RegDGCNN_SurfaceFields/My_python_job/evaluate.py b/RegDGCNN_SurfaceFields/My_python_job/evaluate.py new file mode 100644 index 0000000..b62179f --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/evaluate.py @@ -0,0 +1,284 @@ +# evaluate.py +""" + Evaluation and Visualization of prediction results from trained RedDGCNN models +""" + +import os +import torch +import argparse +import numpy as np +import matplotlib.pyplot as plt +from tqdm import tqdm +import logging +import pprint + +from data_loader import SurfacePressureDataset, PRESSURE_MEAN, PRESSURE_STD +from model_pressure import RegDGCNN_pressure +from utils import setup_logger, setup_seed, visualize_pressure_field, plot_error_distribution, calculate_metrics +from colorama import Fore, Style + +def parse_args(): + """ Parse command line arguments. """ + parser = argparse.ArgumentParser(description='Evaluate pressure prediction models on DrivAerNet++') + + # Basic settings + parser.add_argument('--exp_name', type=str, required=True, help='Experiment name for results folder') + parser.add_argument('--model_checkpoint', type=str, required=True, help='Path to model checkpoint') + parser.add_argument('--seed', type=int, default=1, help='Random seed') + + # Data settings + parser.add_argument('--dataset_path', type=str, required=True, help='Path to dataset') + parser.add_argument('--cache_dir', type=str, help='Path to cache directory') + parser.add_argument('--num_points', type=int, default=10000, help='Number of points to sample') + parser.add_argument('--sample_ids', type=str, help='Path to file with sample IDs to evaluate') + + + # Model settings + parser.add_argument('--dropout', type=float, default=0.4, help='Dropout rate (for model initialization)') + parser.add_argument('--emb_dims', type=int, default=1024, help='Embedding dimensions (for model initialization)') + parser.add_argument('--k', type=int, default=40, help='Number of nearest neighbors (for model initialization)') + parser.add_argument('--output_channels', type=int, default=1, help='Number of output channels') + + # Visualization settings + parser.add_argument('--visualize', action='store_true', help='Generate visualizations') + parser.add_argument('--num_vis_samples', type=int, default=5, help='Number of samples to visualize') + + return parser.parse_args() + +def Initialize_model(args, device): + """ + Initialize and Load the model. + + Args: + args: Command line arguments + device: PyTorch device to use + + Returns: + Loaded model + """ + # Convert args to dictionary ONLY for the model initialization + args_dict = vars(args) + + # Use args_dict only for model initialization + model = RegDGCNN_pressure(args_dict).to(device) + + # Use original args for everything else + logging.info(f"Loading model form {args.model_checkpoint}") + state_dict = torch.load(args.model_checkpoint, map_location=device) + + # I think the if statement is just bull + # Remove 'module.' prefix from state dict keys if loading a DDP model to a non-DDP model + if list(state_dict.keys())[0].startswith('module.') and not hasattr(model, 'module'): + new_state_dict = {} + for k, v in state_dict.items(): + name = k[7:] if k.startswith('module.') else k + new_state_dict[name] = v + logging.info(f"********************") + model.load_state_dict(new_state_dict) + else: + model.load_state_dict(state_dict) + + return model + +def prepare_dataset(args): + """ + Prepare the dataset for evaluation + + Args: + args: Command line arguments + + Returns: + Prepared dataset and sample indices + """ + # Create dataset + dataset = SurfacePressureDataset( + root_dir = args.dataset_path, + num_points = args.num_points, + preprocess = False, + cache_dir = args.cache_dir + ) + + # *********************************** The if statement is not used, Just else statement + # Determine which samples to evaluate + if args.sample_ids: + try: + with open(args.sample_ids, 'r') as f: + sample_ids = [id_.strip() for id_ in f.readlines()] + + # Filter to only include VTK files that match the sample IDs + sample_files = [f for f in dataset.vtk_files if any(id_ in f for id_ in sample_ids)] + sample_indices = [dataset.vtk_files.index(f) for f in sample_files] + + logging.info(f"Found {len(sample_indices)} samples matching the provided IDs") + except Exception as e: + logging.error(f"Error loading sample IDs: {e}") + sample_indices = list(range(len(dataset))) + + else: + # Use all samples + sample_indices = list(range(len(dataset))) + + # If visualizing, limit to the specified number + if args.visualize and args.num_vis_samples < len(sample_indices): + sample_indices = sample_indices[:args.num_vis_samples] + + return dataset, sample_indices + +def evaluate_model(model, dataset, sample_indices, args): + """ + Evaluate the model on the selected samples and save raw prediction data. + + Args: + model: Trained model + dataset: dataset to evaluate on + sample_indices: Indices of samples to evaluate + args: Command line arguments + + Returns: + Dictionary of evaluation results + """ + model.eval() + device = next(model.parameters()).device + + all_metrics = [] + results_dir = os.path.join('results', args.exp_name) + os.makedirs(results_dir, exist_ok=True) + + # Directory for saving raw prediction data + data_dir = os.path.join(results_dir, 'prediction_data') + if args.visualize: + os.makedirs(data_dir, exist_ok=True) + + with torch.no_grad(): + for idx in tqdm(sample_indices, desc="Evaluating samples"): + # Get sample + data, targets = dataset[idx] + + # Skip invalid samples + if data is None or targets is None: + logging.warning(f"Skipped invalid sample at index {idx}") + continue + + # Prepare inputs + data = data.squeeze(1).to(device) + targets = targets.squeeze(1).to(device) + normalized_targets = (targets - PRESSURE_MEAN) / PRESSURE_STD + + # Forward pass + outputs = model(data) + normalized_outputs = outputs.squeeze(1) + + # Calculate metrics on normalized values + batch_metrics = calculate_metrics(normalized_targets.cpu().numpy(), normalized_outputs.cpu().numpy()) + all_metrics.append(batch_metrics) + + # Denormalize for data saving + outputs = normalized_outputs * PRESSURE_STD + PRESSURE_MEAN + + # Save raw prediction data if requested + if args.visualize: + vtk_file = dataset.vtk_files[idx] + sample_name = os.path.basename(vtk_file).replace('.vtk', '') + + logging.info(f"{Fore.GREEN}targets: {targets.shape}{Style.RESET_ALL}") + logging.info(f"{Fore.YELLOW}outputs: {outputs.shape}{Style.RESET_ALL}") + + # Extract points from the data tensor - correct format for later visualization + points = data.cpu().numpy().squeeze(0).transpose(1, 0) # (3, 10000) -> (10000, 3) + true_pressure_np = targets.cpu().numpy().squeeze() + pred_pressure_np = outputs.cpu().numpy().squeeze() + + logging.info(f"{Fore.GREEN}true_pressure_np.shape: {true_pressure_np.shape}{Style.RESET_ALL}") + logging.info(f"{Fore.YELLOW}pred_pressure_np.shape: {pred_pressure_np.shape}{Style.RESET_ALL}") + + # Save raw data for later visualization + output_data = { + 'points': points, + 'true_pressure_np': true_pressure_np, + 'pred_pressure_np': pred_pressure_np, + 'sample_name': sample_name, + 'vtk_file': vtk_file, + 'metrics': batch_metrics + } + + # Save to npz file + data_path = os.path.join(data_dir, f"{sample_name}_prediction_data.npz") + np.savez(data_path, **output_data) + logging.info(f"{Fore.MAGENTA}Saved raw prdiction data to {data_path}{Style.RESET_ALL}") + + # Calculate error metrics + error = np.abs(true_pressure_np - pred_pressure_np) + max_error = np.max(error) + mean_error = np.mean(error) + std_error = np.std(error) + + # Log some basic error statistics + #logging.info(f"Sample: {sample_name}") + #logging.info(f"\tMax Error: {max_error: .6f}") + #logging.info(f"\tMean Error: {mean_error: .6f}") + #logging.info(f"\tStd Error: {std_error: .6f}") + + # Aggregate metrics + agg_metrics = {} + for metric_name in all_metrics[0].keys(): + agg_metrics[f"{metric_name}_mean"] = np.mean([m[metric_name] for m in all_metrics]) + agg_metrics[f"{metric_name}_std"] = np.std([m[metric_name] for m in all_metrics]) + + + # Save metrics + metrics_file = os.path.join(results_dir, 'evaluation_metrics.txt') + with open(metrics_file, 'w') as f: + f.write(f"Evaluation Metrics for RegDGCNN\n") + f.write(f"Model Checkpoint: {args.model_checkpoint}\n") + f.write(f"Number of samples: {len(sample_indices)}\n\n") + + for metric_name, value in agg_metrics.items(): + f.write(f"{metric_name}: {value: .6f}\n") + + # Also save aggregated metrics as numpy file for easy loading + np.savez(os.path.join(results_dir, 'aggregated_metrics.npz'), **agg_metrics) + + logging.info(f"Evaluation complete, Results save to {results_dir}") + logging.info(f"{Fore.MAGENTA}Raw prediction data saved to {data_dir}{Style.RESET_ALL}") + + return agg_metrics + + +def main(): + """ main function to run the evaluation. """ + args = parse_args() + setup_seed(args.seed) + + # Set up logging + results_dir = os.path.join('results', args.exp_name) + os.makedirs(results_dir, exist_ok=True) + log_file = os.path.join(results_dir, 'evaluation.log') + setup_logger(log_file) + + logging.info(f"{Fore.RED}**************************** Starting evaluation of RegDGCNN model{Style.RESET_ALL}") + logging.info(f"Arguments:\n" + pprint.pformat(vars(args), indent=2)) + + # Determine device + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + logging.info(f"Using device: {device}") + + # Initialize model + model = Initialize_model(args, device) + model.eval() + + # Prepare dataset + dataset, sample_indices = prepare_dataset(args) + + # Evaluate model + metrics = evaluate_model(model, dataset, sample_indices, args) + + # Log results + logging.info("Evaluation Results: ") + for metric_name, value in metrics.items(): + logging.info(f"{Fore.YELLOW}{metric_name}: {value: .6f}{Style.RESET_ALL}") + +if __name__ == "__main__": + main() + + + diff --git a/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/best_model_pth b/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/best_model_pth new file mode 100644 index 0000000..306ddb9 Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/best_model_pth differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/best_model_tmp b/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/best_model_tmp new file mode 100644 index 0000000..49f1147 Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/best_model_tmp differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/final_model_pth b/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/final_model_pth new file mode 100644 index 0000000..29aaf70 Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/final_model_pth differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/final_model_tmp b/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/final_model_tmp new file mode 100644 index 0000000..d91653b Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/final_model_tmp differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/test_metrics.txt b/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/test_metrics.txt new file mode 100644 index 0000000..b005d03 --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/test_metrics.txt @@ -0,0 +1,7 @@ +Test MSE: 0.098526 +Test MAE: 0.177215 +Max MAE: 17.375210 +Test R2: 0.9120 +Relative L2 Error: 0.295874 +Relative L1 error: 0.273636 +Total inference time: 0.72s for 54 samples diff --git a/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/training.log b/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/training.log new file mode 100644 index 0000000..257be43 --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/training.log @@ -0,0 +1,20855 @@ +2025-06-24 11:16:30,620 - INFO - args.exp_name : Train_Test +2025-06-24 11:16:30,620 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=6, epochs=10, lr=0.001, num_workers=4, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-24 11:16:30,620 - INFO - Starting training with 1 GPUs +2025-06-24 11:24:59,837 - INFO - args.exp_name : Train_Test +2025-06-24 11:24:59,842 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=6, epochs=10, lr=0.001, num_workers=4, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-24 11:24:59,842 - INFO - Starting training with 1 GPUs +2025-06-24 16:00:07,086 - INFO - args.exp_name : Train_Test +2025-06-24 16:00:07,093 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=6, epochs=10, lr=0.001, num_workers=4, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-24 16:00:07,093 - INFO - Starting training with 1 GPUs +2025-06-24 16:00:07,107 - INFO - points: shape = (10000, 3), dtype = float32 +2025-06-24 16:00:07,108 - INFO - pressures: shape = (10000,), dtype = float32 +2025-06-25 09:37:48,977 - INFO - args.exp_name : Train_Test +2025-06-25 09:37:48,978 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=6, epochs=10, lr=0.001, num_workers=4, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-25 09:37:48,978 - INFO - Starting training with 1 GPUs +2025-06-25 09:37:48,995 - INFO - points: shape = (10000, 3), dtype = float32 +2025-06-25 09:37:48,996 - INFO - pressures: shape = (10000,), dtype = float32 +2025-06-25 09:39:06,417 - INFO - args.exp_name : Train_Test +2025-06-25 09:39:06,418 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=6, epochs=10, lr=0.001, num_workers=4, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-25 09:39:06,418 - INFO - Starting training with 1 GPUs +2025-06-25 09:43:55,282 - INFO - args.exp_name : Train_Test +2025-06-25 09:43:55,283 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=6, epochs=10, lr=0.001, num_workers=4, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-25 09:43:55,285 - INFO - Starting training with 1 GPUs +2025-06-25 09:43:58,808 - INFO - Total trainable parameters: 1437705 +2025-06-25 10:09:51,283 - INFO - args.exp_name : Train_Test +2025-06-25 10:09:51,285 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=6, epochs=10, lr=0.001, num_workers=4, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-25 10:09:51,285 - INFO - Starting training with 1 GPUs +2025-06-25 10:09:55,578 - INFO - Total trainable parameters: 1437705 +2025-06-25 10:38:38,659 - INFO - args.exp_name : Train_Test +2025-06-25 10:38:38,660 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=6, epochs=10, lr=0.001, num_workers=4, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-25 10:38:38,660 - INFO - Starting training with 1 GPUs +2025-06-25 10:38:42,390 - INFO - Total trainable parameters: 1437705 +2025-06-25 10:38:42,390 - INFO - shape: , torch.Size([64]) +2025-06-25 10:38:42,516 - INFO - values: , Parameter containing: +tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], device='cuda:0', + requires_grad=True) +2025-06-25 10:38:42,516 - INFO - shape: , torch.Size([64]) +2025-06-25 10:38:42,516 - INFO - values: , Parameter containing: +tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,516 - INFO - shape: , torch.Size([128]) +2025-06-25 10:38:42,522 - INFO - values: , Parameter containing: +tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1.], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,522 - INFO - shape: , torch.Size([128]) +2025-06-25 10:38:42,522 - INFO - values: , Parameter containing: +tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0.], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,522 - INFO - shape: , torch.Size([1024]) +2025-06-25 10:38:42,523 - INFO - values: , Parameter containing: +tensor([1., 1., 1., ..., 1., 1., 1.], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,526 - INFO - shape: , torch.Size([1024]) +2025-06-25 10:38:42,526 - INFO - values: , Parameter containing: +tensor([0., 0., 0., ..., 0., 0., 0.], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,526 - INFO - shape: , torch.Size([64, 6, 1, 1]) +2025-06-25 10:38:42,542 - INFO - values: , Parameter containing: +tensor([[[[ 0.2104]], + + [[-0.1802]], + + [[-0.0791]], + + [[ 0.1916]], + + [[-0.3843]], + + [[ 0.2448]]], + + + [[[-0.0840]], + + [[ 0.2077]], + + [[ 0.0568]], + + [[-0.0500]], + + [[ 0.1132]], + + [[ 0.0201]]], + + + [[[ 0.1491]], + + [[-0.1591]], + + [[-0.0298]], + + [[-0.0368]], + + [[ 0.0592]], + + [[-0.0016]]], + + + [[[ 0.3569]], + + [[ 0.1270]], + + [[-0.1520]], + + [[-0.2466]], + + [[-0.0684]], + + [[-0.1761]]], + + + [[[-0.1308]], + + [[ 0.0195]], + + [[ 0.2434]], + + [[ 0.2219]], + + [[-0.3991]], + + [[ 0.2531]]], + + + [[[ 0.1140]], + + [[ 0.3872]], + + [[ 0.2695]], + + [[-0.3720]], + + [[-0.3882]], + + [[-0.1969]]], + + + [[[ 0.3585]], + + [[-0.0680]], + + [[ 0.1747]], + + [[-0.1897]], + + [[ 0.4006]], + + [[-0.1727]]], + + + [[[ 0.3062]], + + [[ 0.0048]], + + [[-0.2151]], + + [[ 0.2098]], + + [[-0.2167]], + + [[ 0.1201]]], + + + [[[-0.1179]], + + [[-0.0448]], + + [[-0.3925]], + + [[-0.1946]], + + [[ 0.2215]], + + [[-0.0992]]], + + + [[[ 0.4066]], + + [[ 0.3272]], + + [[-0.0191]], + + [[-0.2725]], + + [[ 0.2486]], + + [[ 0.1267]]], + + + [[[-0.2639]], + + [[ 0.2652]], + + [[ 0.2478]], + + [[ 0.3621]], + + [[-0.2288]], + + [[-0.0672]]], + + + [[[-0.0079]], + + [[ 0.0596]], + + [[-0.3098]], + + [[-0.2897]], + + [[ 0.2221]], + + [[-0.0957]]], + + + [[[ 0.1994]], + + [[ 0.0233]], + + [[ 0.1340]], + + [[ 0.0898]], + + [[ 0.1484]], + + [[ 0.2024]]], + + + [[[-0.3781]], + + [[ 0.2055]], + + [[-0.2870]], + + [[-0.3080]], + + [[ 0.0248]], + + [[-0.0696]]], + + + [[[ 0.2398]], + + [[-0.2364]], + + [[-0.3629]], + + [[ 0.2971]], + + [[-0.0605]], + + [[ 0.2296]]], + + + [[[ 0.1312]], + + [[-0.3061]], + + [[ 0.0820]], + + [[ 0.0981]], + + [[-0.2733]], + + [[-0.1937]]], + + + [[[ 0.1392]], + + [[ 0.0731]], + + [[-0.1737]], + + [[-0.1236]], + + [[ 0.3739]], + + [[-0.0755]]], + + + [[[ 0.2302]], + + [[ 0.1768]], + + [[-0.2639]], + + [[-0.3472]], + + [[ 0.3919]], + + [[ 0.0213]]], + + + [[[ 0.2798]], + + [[ 0.0846]], + + [[ 0.1313]], + + [[ 0.3049]], + + [[ 0.3871]], + + [[-0.2709]]], + + + [[[ 0.0510]], + + [[ 0.3047]], + + [[ 0.2957]], + + [[ 0.2536]], + + [[-0.2955]], + + [[-0.2940]]], + + + [[[-0.2469]], + + [[ 0.0513]], + + [[ 0.4069]], + + [[-0.2579]], + + [[ 0.2175]], + + [[-0.2260]]], + + + [[[-0.3838]], + + [[-0.0868]], + + [[ 0.2352]], + + [[ 0.3790]], + + [[-0.2535]], + + [[ 0.0886]]], + + + [[[ 0.3523]], + + [[ 0.2705]], + + [[ 0.2544]], + + [[ 0.2901]], + + [[ 0.2582]], + + [[ 0.1054]]], + + + [[[-0.2792]], + + [[-0.3428]], + + [[-0.1871]], + + [[-0.0475]], + + [[-0.2502]], + + [[ 0.1494]]], + + + [[[ 0.1263]], + + [[-0.0924]], + + [[ 0.1569]], + + [[ 0.1319]], + + [[ 0.2493]], + + [[ 0.2749]]], + + + [[[-0.1383]], + + [[ 0.3989]], + + [[-0.0472]], + + [[-0.0140]], + + [[-0.3853]], + + [[-0.2628]]], + + + [[[-0.2385]], + + [[-0.1746]], + + [[ 0.2903]], + + [[-0.1334]], + + [[-0.3051]], + + [[ 0.1571]]], + + + [[[ 0.1307]], + + [[ 0.2644]], + + [[-0.2113]], + + [[ 0.0885]], + + [[-0.1486]], + + [[-0.0917]]], + + + [[[-0.3253]], + + [[-0.1861]], + + [[-0.1250]], + + [[ 0.1746]], + + [[ 0.0746]], + + [[ 0.1008]]], + + + [[[ 0.4075]], + + [[ 0.3979]], + + [[ 0.2784]], + + [[ 0.0130]], + + [[-0.2824]], + + [[ 0.3190]]], + + + [[[-0.1021]], + + [[-0.0330]], + + [[-0.3517]], + + [[-0.0806]], + + [[-0.2633]], + + [[ 0.3751]]], + + + [[[-0.3529]], + + [[-0.3182]], + + [[-0.0139]], + + [[-0.2207]], + + [[ 0.1461]], + + [[-0.1571]]], + + + [[[-0.1917]], + + [[ 0.0231]], + + [[ 0.2955]], + + [[-0.2872]], + + [[ 0.1917]], + + [[ 0.2623]]], + + + [[[ 0.3993]], + + [[-0.2857]], + + [[ 0.0989]], + + [[-0.3019]], + + [[ 0.3485]], + + [[-0.1584]]], + + + [[[ 0.2459]], + + [[ 0.0122]], + + [[-0.0318]], + + [[-0.0130]], + + [[ 0.0694]], + + [[ 0.1924]]], + + + [[[ 0.0655]], + + [[ 0.1245]], + + [[-0.3672]], + + [[ 0.2974]], + + [[ 0.3559]], + + [[ 0.3375]]], + + + [[[ 0.3018]], + + [[-0.2946]], + + [[-0.1513]], + + [[ 0.3600]], + + [[-0.3109]], + + [[ 0.3704]]], + + + [[[-0.3211]], + + [[-0.2876]], + + [[ 0.1996]], + + [[-0.2933]], + + [[-0.0935]], + + [[ 0.2970]]], + + + [[[ 0.3234]], + + [[ 0.3861]], + + [[-0.0828]], + + [[-0.3173]], + + [[ 0.4020]], + + [[-0.0870]]], + + + [[[-0.1680]], + + [[ 0.0995]], + + [[-0.2855]], + + [[ 0.2683]], + + [[ 0.2559]], + + [[-0.3239]]], + + + [[[-0.3353]], + + [[-0.0358]], + + [[ 0.1715]], + + [[-0.0118]], + + [[-0.2070]], + + [[ 0.0093]]], + + + [[[-0.3837]], + + [[-0.2885]], + + [[-0.2718]], + + [[ 0.3362]], + + [[ 0.3599]], + + [[-0.1387]]], + + + [[[ 0.0183]], + + [[ 0.1821]], + + [[ 0.0489]], + + [[-0.2044]], + + [[ 0.2355]], + + [[ 0.2510]]], + + + [[[-0.0237]], + + [[-0.0503]], + + [[ 0.3710]], + + [[ 0.3569]], + + [[-0.2315]], + + [[ 0.3994]]], + + + [[[ 0.1010]], + + [[-0.2712]], + + [[ 0.2235]], + + [[-0.3048]], + + [[ 0.3772]], + + [[-0.2625]]], + + + [[[ 0.1155]], + + [[ 0.1243]], + + [[ 0.0971]], + + [[ 0.3386]], + + [[-0.1696]], + + [[-0.1724]]], + + + [[[-0.3538]], + + [[-0.0168]], + + [[-0.1934]], + + [[ 0.0164]], + + [[-0.0837]], + + [[ 0.1355]]], + + + [[[ 0.3532]], + + [[ 0.1205]], + + [[-0.1315]], + + [[-0.2003]], + + [[-0.3561]], + + [[ 0.3435]]], + + + [[[-0.0773]], + + [[ 0.0824]], + + [[ 0.0151]], + + [[-0.2602]], + + [[ 0.2299]], + + [[ 0.2288]]], + + + [[[ 0.0206]], + + [[-0.2320]], + + [[-0.1735]], + + [[ 0.0056]], + + [[-0.2332]], + + [[-0.2278]]], + + + [[[-0.0609]], + + [[-0.1176]], + + [[ 0.1002]], + + [[-0.1073]], + + [[-0.0541]], + + [[-0.1558]]], + + + [[[-0.3731]], + + [[ 0.3560]], + + [[ 0.0779]], + + [[ 0.3651]], + + [[-0.0658]], + + [[-0.1026]]], + + + [[[-0.3838]], + + [[-0.1550]], + + [[ 0.2677]], + + [[-0.3999]], + + [[-0.1428]], + + [[ 0.2487]]], + + + [[[ 0.2353]], + + [[ 0.1507]], + + [[-0.0808]], + + [[-0.0309]], + + [[-0.0775]], + + [[-0.1505]]], + + + [[[ 0.1704]], + + [[-0.1391]], + + [[-0.2293]], + + [[ 0.0926]], + + [[-0.2951]], + + [[-0.0871]]], + + + [[[ 0.2086]], + + [[ 0.1526]], + + [[ 0.3476]], + + [[-0.2784]], + + [[ 0.0035]], + + [[-0.1206]]], + + + [[[-0.2283]], + + [[-0.2297]], + + [[ 0.1029]], + + [[ 0.0427]], + + [[-0.2084]], + + [[-0.2878]]], + + + [[[-0.3382]], + + [[ 0.4001]], + + [[-0.0696]], + + [[-0.0944]], + + [[-0.1501]], + + [[-0.2042]]], + + + [[[-0.3733]], + + [[-0.2397]], + + [[ 0.2495]], + + [[ 0.0895]], + + [[-0.1440]], + + [[-0.1543]]], + + + [[[ 0.2608]], + + [[ 0.2943]], + + [[ 0.3933]], + + [[ 0.1191]], + + [[ 0.1972]], + + [[-0.0328]]], + + + [[[-0.2387]], + + [[-0.4000]], + + [[ 0.2487]], + + [[-0.0593]], + + [[ 0.1677]], + + [[ 0.0201]]], + + + [[[-0.3810]], + + [[-0.2003]], + + [[ 0.1149]], + + [[-0.0826]], + + [[-0.3942]], + + [[ 0.1432]]], + + + [[[ 0.0157]], + + [[-0.1312]], + + [[ 0.1649]], + + [[ 0.0862]], + + [[-0.1506]], + + [[ 0.3667]]], + + + [[[ 0.2048]], + + [[ 0.0513]], + + [[-0.0784]], + + [[-0.3458]], + + [[ 0.3152]], + + [[-0.1053]]]], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,543 - INFO - shape: , torch.Size([128, 64, 1, 1]) +2025-06-25 10:38:42,546 - INFO - values: , Parameter containing: +tensor([[[[-0.0808]], + + [[ 0.0801]], + + [[ 0.0444]], + + ..., + + [[ 0.0592]], + + [[-0.0676]], + + [[ 0.0751]]], + + + [[[-0.0619]], + + [[-0.1105]], + + [[ 0.0419]], + + ..., + + [[-0.0286]], + + [[-0.0789]], + + [[ 0.0325]]], + + + [[[-0.0935]], + + [[ 0.0875]], + + [[ 0.0719]], + + ..., + + [[-0.0925]], + + [[-0.0367]], + + [[-0.0277]]], + + + ..., + + + [[[-0.0862]], + + [[ 0.1048]], + + [[ 0.0267]], + + ..., + + [[-0.0470]], + + [[ 0.1131]], + + [[ 0.0220]]], + + + [[[-0.1204]], + + [[-0.1204]], + + [[ 0.1186]], + + ..., + + [[ 0.1118]], + + [[-0.1240]], + + [[ 0.0192]]], + + + [[[ 0.1208]], + + [[-0.0812]], + + [[-0.1222]], + + ..., + + [[-0.0971]], + + [[ 0.0477]], + + [[-0.0167]]]], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,546 - INFO - shape: , torch.Size([1024, 128, 1]) +2025-06-25 10:38:42,548 - INFO - values: , Parameter containing: +tensor([[[-0.0402], + [ 0.0031], + [-0.0203], + ..., + [-0.0442], + [-0.0669], + [ 0.0306]], + + [[ 0.0071], + [-0.0399], + [ 0.0881], + ..., + [-0.0406], + [ 0.0702], + [ 0.0065]], + + [[ 0.0508], + [-0.0571], + [ 0.0168], + ..., + [ 0.0142], + [ 0.0077], + [ 0.0860]], + + ..., + + [[-0.0339], + [ 0.0635], + [ 0.0569], + ..., + [-0.0881], + [-0.0744], + [-0.0128]], + + [[-0.0349], + [-0.0430], + [ 0.0296], + ..., + [-0.0136], + [ 0.0481], + [-0.0138]], + + [[-0.0192], + [-0.0866], + [-0.0157], + ..., + [-0.0722], + [-0.0168], + [ 0.0350]]], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,548 - INFO - shape: , torch.Size([512, 1024]) +2025-06-25 10:38:42,549 - INFO - values: , Parameter containing: +tensor([[-0.0092, 0.0261, 0.0006, ..., -0.0145, 0.0162, 0.0020], + [ 0.0020, 0.0042, -0.0021, ..., 0.0125, -0.0189, 0.0091], + [ 0.0237, -0.0250, -0.0020, ..., -0.0119, 0.0226, -0.0298], + ..., + [ 0.0052, 0.0286, 0.0177, ..., 0.0105, -0.0227, -0.0107], + [-0.0106, 0.0071, -0.0291, ..., 0.0075, -0.0022, -0.0130], + [-0.0216, -0.0093, 0.0104, ..., -0.0306, -0.0012, -0.0045]], + device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,549 - INFO - shape: , torch.Size([512]) +2025-06-25 10:38:42,569 - INFO - values: , Parameter containing: +tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1.], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,570 - INFO - shape: , torch.Size([512]) +2025-06-25 10:38:42,570 - INFO - values: , Parameter containing: +tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0.], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,571 - INFO - shape: , torch.Size([256, 512]) +2025-06-25 10:38:42,573 - INFO - values: , Parameter containing: +tensor([[ 0.0095, 0.0058, -0.0355, ..., -0.0400, 0.0399, 0.0425], + [ 0.0262, 0.0091, -0.0043, ..., 0.0402, -0.0261, 0.0349], + [-0.0207, -0.0270, 0.0177, ..., 0.0219, -0.0286, -0.0340], + ..., + [-0.0006, 0.0054, 0.0166, ..., 0.0381, 0.0380, 0.0036], + [-0.0321, -0.0181, 0.0030, ..., 0.0080, -0.0127, -0.0304], + [-0.0316, -0.0410, 0.0307, ..., -0.0245, 0.0197, 0.0032]], + device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,573 - INFO - shape: , torch.Size([256]) +2025-06-25 10:38:42,583 - INFO - values: , Parameter containing: +tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1.], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,583 - INFO - shape: , torch.Size([256]) +2025-06-25 10:38:42,583 - INFO - values: , Parameter containing: +tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,583 - INFO - shape: , torch.Size([9, 256]) +2025-06-25 10:38:42,584 - INFO - values: , Parameter containing: +tensor([[0., 0., 0., ..., 0., 0., 0.], + [0., 0., 0., ..., 0., 0., 0.], + [0., 0., 0., ..., 0., 0., 0.], + ..., + [0., 0., 0., ..., 0., 0., 0.], + [0., 0., 0., ..., 0., 0., 0.], + [0., 0., 0., ..., 0., 0., 0.]], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,585 - INFO - shape: , torch.Size([9]) +2025-06-25 10:38:42,585 - INFO - values: , Parameter containing: +tensor([1., 0., 0., 0., 1., 0., 0., 0., 1.], device='cuda:0', + requires_grad=True) +2025-06-25 10:38:42,585 - INFO - shape: , torch.Size([64]) +2025-06-25 10:38:42,588 - INFO - values: , Parameter containing: +tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], device='cuda:0', + requires_grad=True) +2025-06-25 10:38:42,588 - INFO - shape: , torch.Size([64]) +2025-06-25 10:38:42,589 - INFO - values: , Parameter containing: +tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,589 - INFO - shape: , torch.Size([64]) +2025-06-25 10:38:42,591 - INFO - values: , Parameter containing: +tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], device='cuda:0', + requires_grad=True) +2025-06-25 10:38:42,592 - INFO - shape: , torch.Size([64]) +2025-06-25 10:38:42,592 - INFO - values: , Parameter containing: +tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,592 - INFO - shape: , torch.Size([64]) +2025-06-25 10:38:42,595 - INFO - values: , Parameter containing: +tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], device='cuda:0', + requires_grad=True) +2025-06-25 10:38:42,595 - INFO - shape: , torch.Size([64]) +2025-06-25 10:38:42,595 - INFO - values: , Parameter containing: +tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,595 - INFO - shape: , torch.Size([64]) +2025-06-25 10:38:42,598 - INFO - values: , Parameter containing: +tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], device='cuda:0', + requires_grad=True) +2025-06-25 10:38:42,598 - INFO - shape: , torch.Size([64]) +2025-06-25 10:38:42,598 - INFO - values: , Parameter containing: +tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,598 - INFO - shape: , torch.Size([64]) +2025-06-25 10:38:42,601 - INFO - values: , Parameter containing: +tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], device='cuda:0', + requires_grad=True) +2025-06-25 10:38:42,602 - INFO - shape: , torch.Size([64]) +2025-06-25 10:38:42,602 - INFO - values: , Parameter containing: +tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,603 - INFO - shape: , torch.Size([1024]) +2025-06-25 10:38:42,603 - INFO - values: , Parameter containing: +tensor([1., 1., 1., ..., 1., 1., 1.], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,603 - INFO - shape: , torch.Size([1024]) +2025-06-25 10:38:42,604 - INFO - values: , Parameter containing: +tensor([0., 0., 0., ..., 0., 0., 0.], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,604 - INFO - shape: , torch.Size([64]) +2025-06-25 10:38:42,607 - INFO - values: , Parameter containing: +tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], device='cuda:0', + requires_grad=True) +2025-06-25 10:38:42,607 - INFO - shape: , torch.Size([64]) +2025-06-25 10:38:42,607 - INFO - values: , Parameter containing: +tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,607 - INFO - shape: , torch.Size([256]) +2025-06-25 10:38:42,617 - INFO - values: , Parameter containing: +tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1.], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,617 - INFO - shape: , torch.Size([256]) +2025-06-25 10:38:42,617 - INFO - values: , Parameter containing: +tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,618 - INFO - shape: , torch.Size([256]) +2025-06-25 10:38:42,628 - INFO - values: , Parameter containing: +tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1.], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,628 - INFO - shape: , torch.Size([256]) +2025-06-25 10:38:42,629 - INFO - values: , Parameter containing: +tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,629 - INFO - shape: , torch.Size([128]) +2025-06-25 10:38:42,634 - INFO - values: , Parameter containing: +tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1.], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,634 - INFO - shape: , torch.Size([128]) +2025-06-25 10:38:42,634 - INFO - values: , Parameter containing: +tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0.], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,635 - INFO - shape: , torch.Size([64, 6, 1, 1]) +2025-06-25 10:38:42,651 - INFO - values: , Parameter containing: +tensor([[[[-2.3897e-02]], + + [[-2.6691e-01]], + + [[ 2.6319e-01]], + + [[ 3.0958e-01]], + + [[ 3.7575e-01]], + + [[-3.9579e-02]]], + + + [[[ 1.9809e-01]], + + [[-1.4374e-02]], + + [[-3.1675e-02]], + + [[-1.5718e-01]], + + [[-1.2557e-01]], + + [[-3.9615e-05]]], + + + [[[-1.0764e-01]], + + [[ 7.1315e-02]], + + [[ 2.7186e-01]], + + [[-2.1235e-01]], + + [[ 2.9650e-03]], + + [[ 3.5170e-01]]], + + + [[[-3.4056e-01]], + + [[ 1.5578e-01]], + + [[ 6.2605e-02]], + + [[-1.3101e-01]], + + [[-1.8446e-01]], + + [[ 3.1537e-01]]], + + + [[[ 3.7144e-01]], + + [[ 8.5806e-02]], + + [[-2.6967e-01]], + + [[ 8.7051e-02]], + + [[ 1.0699e-01]], + + [[-5.0443e-02]]], + + + [[[ 7.8117e-02]], + + [[ 1.9789e-01]], + + [[-3.5183e-01]], + + [[-1.0784e-01]], + + [[-1.3014e-01]], + + [[ 1.4937e-01]]], + + + [[[ 2.0766e-01]], + + [[ 1.9663e-01]], + + [[-3.6546e-02]], + + [[-3.3927e-01]], + + [[-1.0464e-01]], + + [[-2.6392e-01]]], + + + [[[ 2.0106e-01]], + + [[-1.6703e-01]], + + [[ 4.8410e-02]], + + [[-1.2239e-01]], + + [[ 2.1513e-01]], + + [[ 2.0170e-01]]], + + + [[[ 1.5108e-01]], + + [[ 3.4691e-01]], + + [[ 3.1370e-01]], + + [[ 4.0201e-01]], + + [[ 3.3610e-01]], + + [[ 1.2566e-01]]], + + + [[[ 5.2043e-02]], + + [[-2.4462e-01]], + + [[-1.9471e-01]], + + [[-2.9193e-01]], + + [[ 1.6357e-01]], + + [[ 2.1119e-01]]], + + + [[[-2.0600e-01]], + + [[-3.9069e-01]], + + [[-1.0320e-01]], + + [[-2.2157e-01]], + + [[-2.9827e-01]], + + [[-8.9723e-02]]], + + + [[[-1.6268e-01]], + + [[ 1.7052e-01]], + + [[-8.9852e-02]], + + [[ 1.6352e-01]], + + [[-3.6650e-01]], + + [[ 8.8876e-02]]], + + + [[[-1.0254e-01]], + + [[ 3.9242e-01]], + + [[ 1.3591e-01]], + + [[-1.0763e-01]], + + [[-2.5106e-01]], + + [[-3.3839e-01]]], + + + [[[ 2.4748e-01]], + + [[ 4.0582e-02]], + + [[ 3.6023e-01]], + + [[-1.0688e-01]], + + [[ 3.4530e-01]], + + [[-2.9733e-01]]], + + + [[[-2.3852e-01]], + + [[-1.7447e-01]], + + [[ 6.4518e-02]], + + [[-1.3465e-02]], + + [[ 1.7623e-01]], + + [[ 3.8281e-01]]], + + + [[[-1.7709e-01]], + + [[-1.1235e-01]], + + [[-3.3109e-01]], + + [[-3.7451e-02]], + + [[ 2.1286e-01]], + + [[-3.2901e-01]]], + + + [[[ 3.3842e-01]], + + [[-9.2302e-02]], + + [[ 1.9910e-01]], + + [[-2.4930e-01]], + + [[-2.0211e-01]], + + [[-9.0722e-02]]], + + + [[[ 2.3433e-01]], + + [[ 2.7550e-01]], + + [[ 2.2412e-01]], + + [[ 2.6615e-01]], + + [[ 6.8556e-02]], + + [[-2.3972e-01]]], + + + [[[ 3.9010e-01]], + + [[ 3.4596e-01]], + + [[-2.0139e-01]], + + [[-2.9431e-01]], + + [[-2.9951e-01]], + + [[ 3.8906e-01]]], + + + [[[ 4.0014e-03]], + + [[ 7.4538e-04]], + + [[-3.1093e-01]], + + [[ 2.9039e-01]], + + [[-2.4449e-01]], + + [[-3.8507e-01]]], + + + [[[ 3.1466e-01]], + + [[ 3.2916e-01]], + + [[-2.0693e-01]], + + [[ 1.0920e-01]], + + [[ 3.3846e-01]], + + [[ 4.6913e-02]]], + + + [[[-8.2591e-02]], + + [[ 3.2818e-01]], + + [[-2.0800e-02]], + + [[ 1.5455e-01]], + + [[ 3.3976e-01]], + + [[-3.0195e-01]]], + + + [[[ 9.6929e-02]], + + [[-8.1165e-02]], + + [[ 3.7005e-01]], + + [[ 8.0938e-02]], + + [[-1.8604e-01]], + + [[ 6.9150e-02]]], + + + [[[-1.4221e-01]], + + [[-5.1671e-02]], + + [[ 1.5591e-01]], + + [[-3.8185e-01]], + + [[-2.7183e-01]], + + [[-1.0323e-01]]], + + + [[[-1.2691e-01]], + + [[-2.5710e-01]], + + [[ 2.9997e-01]], + + [[ 1.2104e-01]], + + [[ 3.8371e-01]], + + [[-6.2674e-02]]], + + + [[[ 3.4903e-02]], + + [[-3.7368e-02]], + + [[-2.0989e-01]], + + [[ 3.7366e-02]], + + [[ 2.0184e-01]], + + [[-2.3241e-01]]], + + + [[[ 3.1655e-01]], + + [[ 4.3376e-02]], + + [[-2.2861e-02]], + + [[ 2.5815e-01]], + + [[-7.1605e-02]], + + [[ 3.6285e-01]]], + + + [[[ 5.0046e-02]], + + [[ 3.1143e-01]], + + [[-2.1707e-01]], + + [[ 2.8023e-01]], + + [[ 3.6164e-01]], + + [[ 2.0079e-01]]], + + + [[[-2.5034e-01]], + + [[ 1.7979e-02]], + + [[-3.7212e-01]], + + [[ 2.8625e-01]], + + [[ 1.2489e-01]], + + [[-2.8700e-01]]], + + + [[[-2.6047e-01]], + + [[-2.1583e-01]], + + [[ 3.3932e-01]], + + [[ 3.7298e-01]], + + [[-3.8783e-01]], + + [[ 2.4400e-01]]], + + + [[[ 8.3056e-02]], + + [[-1.7710e-01]], + + [[ 2.7769e-01]], + + [[-7.8093e-03]], + + [[-3.8622e-01]], + + [[ 1.4350e-01]]], + + + [[[ 1.7717e-01]], + + [[-2.8561e-01]], + + [[-5.9110e-02]], + + [[ 1.1660e-01]], + + [[ 1.7459e-01]], + + [[ 3.9873e-01]]], + + + [[[-2.6478e-01]], + + [[-2.3541e-01]], + + [[ 3.4933e-01]], + + [[ 1.2196e-02]], + + [[ 3.2850e-01]], + + [[ 3.1418e-01]]], + + + [[[ 2.6525e-02]], + + [[ 2.0564e-01]], + + [[ 3.1708e-01]], + + [[-1.1602e-01]], + + [[-5.3004e-02]], + + [[ 3.4775e-01]]], + + + [[[-2.0917e-01]], + + [[-2.3996e-01]], + + [[ 1.9946e-01]], + + [[-6.8574e-02]], + + [[-8.3004e-02]], + + [[-2.6785e-01]]], + + + [[[ 2.4027e-02]], + + [[ 2.1725e-01]], + + [[ 2.0970e-01]], + + [[ 1.9579e-01]], + + [[-2.9326e-01]], + + [[-3.6482e-01]]], + + + [[[ 2.4783e-01]], + + [[-3.3369e-01]], + + [[ 3.3089e-01]], + + [[ 2.9333e-01]], + + [[ 2.4027e-01]], + + [[ 1.6839e-01]]], + + + [[[ 6.2790e-02]], + + [[-1.5485e-02]], + + [[ 1.5180e-02]], + + [[-3.4462e-01]], + + [[-3.5259e-01]], + + [[ 1.7493e-01]]], + + + [[[ 4.3916e-02]], + + [[-2.7516e-01]], + + [[-3.7192e-01]], + + [[ 1.2046e-01]], + + [[-2.2444e-01]], + + [[-2.3717e-02]]], + + + [[[ 3.3488e-01]], + + [[ 7.3388e-02]], + + [[-2.6783e-01]], + + [[ 1.3627e-01]], + + [[ 9.4766e-03]], + + [[-3.0235e-01]]], + + + [[[-3.5475e-01]], + + [[ 2.2280e-01]], + + [[-1.8068e-01]], + + [[ 3.4876e-01]], + + [[-2.0582e-01]], + + [[-8.0397e-02]]], + + + [[[-5.4861e-02]], + + [[ 1.6743e-01]], + + [[ 7.5067e-02]], + + [[-3.7207e-01]], + + [[ 3.7336e-01]], + + [[-9.3957e-02]]], + + + [[[ 1.0395e-01]], + + [[-1.5793e-01]], + + [[ 3.1547e-01]], + + [[-4.2108e-02]], + + [[ 1.2027e-02]], + + [[ 1.1006e-01]]], + + + [[[-2.5192e-01]], + + [[ 3.6272e-01]], + + [[ 1.7334e-01]], + + [[-5.3914e-02]], + + [[ 3.7598e-01]], + + [[-2.5667e-01]]], + + + [[[ 2.3943e-02]], + + [[-2.2848e-01]], + + [[-1.1305e-01]], + + [[-3.4343e-01]], + + [[-4.6663e-02]], + + [[-3.4263e-01]]], + + + [[[-3.3581e-01]], + + [[ 3.8743e-01]], + + [[-1.2060e-01]], + + [[ 1.7991e-02]], + + [[ 2.3054e-01]], + + [[ 2.4792e-01]]], + + + [[[ 2.7606e-02]], + + [[ 2.7995e-01]], + + [[ 2.1753e-01]], + + [[-2.0385e-01]], + + [[-2.5343e-01]], + + [[ 4.0808e-01]]], + + + [[[ 3.9389e-01]], + + [[ 2.7841e-01]], + + [[-1.7579e-01]], + + [[-3.9785e-01]], + + [[ 2.0461e-01]], + + [[-1.9326e-01]]], + + + [[[-2.6450e-01]], + + [[ 2.8254e-01]], + + [[-2.7032e-01]], + + [[-9.5179e-02]], + + [[-7.1349e-03]], + + [[ 2.1132e-01]]], + + + [[[ 3.1241e-01]], + + [[ 4.0481e-02]], + + [[ 5.6559e-02]], + + [[ 1.3851e-02]], + + [[ 1.1459e-01]], + + [[ 2.5119e-01]]], + + + [[[-3.6372e-01]], + + [[-6.9620e-02]], + + [[ 2.0942e-01]], + + [[ 8.0481e-02]], + + [[-2.5990e-01]], + + [[ 2.7889e-01]]], + + + [[[-3.0713e-01]], + + [[-3.6285e-01]], + + [[ 3.8107e-02]], + + [[ 2.1847e-01]], + + [[ 1.8798e-01]], + + [[-7.0681e-03]]], + + + [[[-2.9493e-01]], + + [[-2.4616e-01]], + + [[-2.4293e-01]], + + [[ 2.4175e-01]], + + [[ 3.0435e-01]], + + [[ 3.2484e-01]]], + + + [[[ 3.1054e-01]], + + [[ 3.6393e-01]], + + [[ 1.5452e-01]], + + [[ 2.2048e-01]], + + [[ 3.6895e-01]], + + [[ 3.2268e-02]]], + + + [[[-2.2169e-01]], + + [[ 3.8471e-01]], + + [[ 5.6197e-02]], + + [[ 2.0913e-01]], + + [[ 3.4629e-01]], + + [[ 1.8342e-01]]], + + + [[[-5.1384e-02]], + + [[-1.5985e-01]], + + [[-7.4092e-03]], + + [[ 1.1119e-01]], + + [[ 3.3349e-01]], + + [[-2.5655e-01]]], + + + [[[-3.3514e-01]], + + [[ 3.9779e-01]], + + [[-1.8797e-01]], + + [[-2.5098e-01]], + + [[ 1.0416e-01]], + + [[ 3.1438e-01]]], + + + [[[ 2.9934e-01]], + + [[ 7.4722e-02]], + + [[-2.3987e-01]], + + [[ 3.6604e-02]], + + [[ 3.1932e-01]], + + [[ 1.6386e-01]]], + + + [[[-2.5528e-01]], + + [[-1.3581e-01]], + + [[-3.7130e-01]], + + [[ 1.1091e-01]], + + [[ 8.7793e-03]], + + [[-2.4222e-01]]], + + + [[[-2.1799e-01]], + + [[ 1.1598e-01]], + + [[ 3.0744e-01]], + + [[-1.3358e-01]], + + [[ 3.3512e-02]], + + [[-2.4386e-01]]], + + + [[[-2.6588e-01]], + + [[-2.8783e-01]], + + [[ 3.0375e-01]], + + [[ 1.6312e-02]], + + [[ 4.0121e-01]], + + [[-4.3093e-02]]], + + + [[[-2.2502e-01]], + + [[-2.4819e-01]], + + [[-1.5412e-02]], + + [[ 1.3704e-01]], + + [[-3.7049e-01]], + + [[ 2.8949e-01]]], + + + [[[-3.6665e-01]], + + [[ 3.7638e-01]], + + [[ 2.3239e-01]], + + [[ 1.7782e-01]], + + [[ 9.6222e-02]], + + [[-2.7082e-01]]], + + + [[[ 4.5406e-02]], + + [[ 1.6287e-01]], + + [[ 3.7475e-01]], + + [[-1.2406e-01]], + + [[ 2.5560e-01]], + + [[ 3.4433e-01]]]], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,651 - INFO - shape: , torch.Size([64, 64, 1, 1]) +2025-06-25 10:38:42,654 - INFO - values: , Parameter containing: +tensor([[[[-0.0994]], + + [[-0.0949]], + + [[ 0.0298]], + + ..., + + [[-0.0310]], + + [[ 0.1048]], + + [[ 0.0933]]], + + + [[[ 0.0449]], + + [[-0.0706]], + + [[-0.1218]], + + ..., + + [[ 0.1152]], + + [[-0.0056]], + + [[ 0.0720]]], + + + [[[ 0.0797]], + + [[-0.1226]], + + [[-0.0525]], + + ..., + + [[-0.0373]], + + [[ 0.0948]], + + [[-0.0960]]], + + + ..., + + + [[[-0.0069]], + + [[ 0.1041]], + + [[ 0.0592]], + + ..., + + [[ 0.0395]], + + [[ 0.0608]], + + [[-0.0543]]], + + + [[[ 0.0662]], + + [[ 0.0101]], + + [[ 0.0928]], + + ..., + + [[ 0.0875]], + + [[ 0.1216]], + + [[ 0.0746]]], + + + [[[ 0.0277]], + + [[-0.0398]], + + [[ 0.0075]], + + ..., + + [[-0.1092]], + + [[ 0.0873]], + + [[-0.0638]]]], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,654 - INFO - shape: , torch.Size([64, 128, 1, 1]) +2025-06-25 10:38:42,658 - INFO - values: , Parameter containing: +tensor([[[[-0.0457]], + + [[ 0.0069]], + + [[-0.0066]], + + ..., + + [[ 0.0123]], + + [[-0.0610]], + + [[ 0.0790]]], + + + [[[-0.0127]], + + [[-0.0824]], + + [[-0.0305]], + + ..., + + [[ 0.0324]], + + [[-0.0152]], + + [[ 0.0456]]], + + + [[[-0.0714]], + + [[ 0.0153]], + + [[-0.0155]], + + ..., + + [[-0.0803]], + + [[-0.0663]], + + [[-0.0741]]], + + + ..., + + + [[[ 0.0574]], + + [[ 0.0446]], + + [[ 0.0500]], + + ..., + + [[ 0.0878]], + + [[ 0.0301]], + + [[-0.0837]]], + + + [[[-0.0643]], + + [[-0.0268]], + + [[ 0.0362]], + + ..., + + [[-0.0627]], + + [[-0.0429]], + + [[-0.0046]]], + + + [[[ 0.0066]], + + [[ 0.0852]], + + [[-0.0869]], + + ..., + + [[-0.0464]], + + [[ 0.0516]], + + [[ 0.0848]]]], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,658 - INFO - shape: , torch.Size([64, 64, 1, 1]) +2025-06-25 10:38:42,661 - INFO - values: , Parameter containing: +tensor([[[[ 0.1086]], + + [[ 0.0671]], + + [[-0.0518]], + + ..., + + [[ 0.0604]], + + [[ 0.0906]], + + [[-0.1109]]], + + + [[[ 0.0940]], + + [[-0.1075]], + + [[-0.0139]], + + ..., + + [[ 0.1097]], + + [[-0.0161]], + + [[ 0.0208]]], + + + [[[ 0.0571]], + + [[-0.0632]], + + [[-0.0797]], + + ..., + + [[-0.0529]], + + [[-0.0730]], + + [[ 0.0834]]], + + + ..., + + + [[[-0.0405]], + + [[ 0.0479]], + + [[-0.1224]], + + ..., + + [[-0.1102]], + + [[ 0.0326]], + + [[ 0.1133]]], + + + [[[-0.0581]], + + [[ 0.0892]], + + [[-0.0993]], + + ..., + + [[-0.1188]], + + [[ 0.0627]], + + [[ 0.0338]]], + + + [[[ 0.1140]], + + [[ 0.0285]], + + [[ 0.0330]], + + ..., + + [[ 0.0500]], + + [[-0.0302]], + + [[ 0.0686]]]], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,661 - INFO - shape: , torch.Size([64, 128, 1, 1]) +2025-06-25 10:38:42,663 - INFO - values: , Parameter containing: +tensor([[[[ 0.0220]], + + [[ 0.0619]], + + [[-0.0067]], + + ..., + + [[-0.0331]], + + [[-0.0562]], + + [[ 0.0365]]], + + + [[[-0.0301]], + + [[-0.0823]], + + [[-0.0733]], + + ..., + + [[ 0.0106]], + + [[ 0.0752]], + + [[-0.0727]]], + + + [[[ 0.0596]], + + [[-0.0545]], + + [[ 0.0096]], + + ..., + + [[-0.0862]], + + [[ 0.0386]], + + [[-0.0814]]], + + + ..., + + + [[[ 0.0266]], + + [[-0.0156]], + + [[-0.0359]], + + ..., + + [[ 0.0854]], + + [[ 0.0834]], + + [[-0.0253]]], + + + [[[-0.0778]], + + [[ 0.0859]], + + [[-0.0323]], + + ..., + + [[ 0.0865]], + + [[-0.0414]], + + [[-0.0454]]], + + + [[[ 0.0695]], + + [[-0.0433]], + + [[-0.0714]], + + ..., + + [[ 0.0545]], + + [[ 0.0749]], + + [[-0.0181]]]], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,663 - INFO - shape: , torch.Size([1024, 192, 1]) +2025-06-25 10:38:42,665 - INFO - values: , Parameter containing: +tensor([[[ 0.0003], + [ 0.0135], + [-0.0149], + ..., + [-0.0082], + [ 0.0508], + [-0.0172]], + + [[ 0.0194], + [ 0.0282], + [-0.0225], + ..., + [-0.0385], + [ 0.0104], + [ 0.0414]], + + [[ 0.0130], + [-0.0249], + [-0.0551], + ..., + [-0.0126], + [-0.0489], + [-0.0162]], + + ..., + + [[ 0.0319], + [ 0.0665], + [ 0.0591], + ..., + [-0.0094], + [-0.0433], + [ 0.0534]], + + [[ 0.0150], + [ 0.0439], + [-0.0030], + ..., + [ 0.0033], + [ 0.0324], + [ 0.0096]], + + [[-0.0555], + [-0.0511], + [ 0.0314], + ..., + [-0.0042], + [-0.0425], + [ 0.0536]]], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,666 - INFO - shape: , torch.Size([64, 16, 1]) +2025-06-25 10:38:42,668 - INFO - values: , Parameter containing: +tensor([[[ 0.1007], + [-0.0574], + [-0.0414], + ..., + [-0.0379], + [ 0.2423], + [ 0.1333]], + + [[ 0.1576], + [ 0.0932], + [-0.0702], + ..., + [ 0.1092], + [-0.0735], + [ 0.1162]], + + [[-0.0314], + [-0.0033], + [ 0.0704], + ..., + [ 0.0184], + [ 0.0483], + [-0.2117]], + + ..., + + [[ 0.0765], + [-0.2088], + [-0.0608], + ..., + [-0.0596], + [-0.2131], + [-0.0600]], + + [[-0.0705], + [-0.0434], + [-0.1741], + ..., + [ 0.1960], + [ 0.1586], + [ 0.2274]], + + [[-0.1057], + [ 0.0211], + [-0.1496], + ..., + [ 0.1945], + [ 0.2066], + [ 0.2240]]], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,668 - INFO - shape: , torch.Size([256, 1216, 1]) +2025-06-25 10:38:42,670 - INFO - values: , Parameter containing: +tensor([[[ 0.0260], + [ 0.0159], + [-0.0236], + ..., + [-0.0123], + [-0.0285], + [-0.0244]], + + [[-0.0112], + [-0.0204], + [-0.0180], + ..., + [ 0.0196], + [-0.0228], + [ 0.0198]], + + [[-0.0148], + [-0.0054], + [-0.0080], + ..., + [-0.0252], + [-0.0212], + [-0.0253]], + + ..., + + [[ 0.0133], + [-0.0180], + [ 0.0246], + ..., + [ 0.0271], + [-0.0063], + [ 0.0017]], + + [[ 0.0082], + [-0.0200], + [-0.0256], + ..., + [ 0.0261], + [ 0.0082], + [-0.0039]], + + [[-0.0236], + [ 0.0136], + [ 0.0178], + ..., + [ 0.0052], + [-0.0040], + [-0.0030]]], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,670 - INFO - shape: , torch.Size([256, 256, 1]) +2025-06-25 10:38:42,672 - INFO - values: , Parameter containing: +tensor([[[ 0.0408], + [ 0.0296], + [ 0.0549], + ..., + [ 0.0092], + [ 0.0017], + [ 0.0536]], + + [[ 0.0324], + [-0.0265], + [ 0.0373], + ..., + [-0.0258], + [ 0.0152], + [ 0.0304]], + + [[ 0.0409], + [-0.0599], + [ 0.0131], + ..., + [ 0.0267], + [ 0.0087], + [ 0.0361]], + + ..., + + [[-0.0104], + [ 0.0095], + [ 0.0621], + ..., + [ 0.0463], + [-0.0310], + [-0.0573]], + + [[-0.0518], + [ 0.0009], + [-0.0042], + ..., + [ 0.0049], + [ 0.0304], + [ 0.0583]], + + [[-0.0026], + [ 0.0406], + [-0.0219], + ..., + [ 0.0615], + [-0.0095], + [ 0.0415]]], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,672 - INFO - shape: , torch.Size([128, 256, 1]) +2025-06-25 10:38:42,674 - INFO - values: , Parameter containing: +tensor([[[ 0.0256], + [ 0.0395], + [-0.0311], + ..., + [ 0.0432], + [ 0.0211], + [-0.0005]], + + [[ 0.0102], + [ 0.0392], + [-0.0098], + ..., + [-0.0545], + [-0.0143], + [-0.0322]], + + [[ 0.0042], + [ 0.0356], + [-0.0462], + ..., + [ 0.0441], + [-0.0501], + [-0.0058]], + + ..., + + [[-0.0602], + [-0.0575], + [-0.0398], + ..., + [ 0.0253], + [ 0.0204], + [ 0.0014]], + + [[-0.0341], + [-0.0396], + [-0.0335], + ..., + [-0.0424], + [ 0.0034], + [-0.0104]], + + [[ 0.0258], + [ 0.0233], + [ 0.0078], + ..., + [ 0.0463], + [-0.0465], + [ 0.0272]]], device='cuda:0', requires_grad=True) +2025-06-25 10:38:42,675 - INFO - shape: , torch.Size([1, 128, 1]) +2025-06-25 10:38:42,680 - INFO - values: , Parameter containing: +tensor([[[-0.0301], + [-0.0044], + [ 0.0655], + [ 0.0767], + [-0.0431], + [-0.0802], + [ 0.0069], + [ 0.0256], + [ 0.0109], + [-0.0405], + [ 0.0571], + [-0.0292], + [-0.0125], + [ 0.0075], + [ 0.0425], + [ 0.0851], + [-0.0748], + [ 0.0222], + [-0.0863], + [-0.0632], + [-0.0287], + [-0.0264], + [ 0.0135], + [ 0.0385], + [-0.0118], + [-0.0756], + [-0.0115], + [ 0.0001], + [-0.0819], + [ 0.0295], + [-0.0811], + [-0.0348], + [ 0.0079], + [ 0.0770], + [-0.0338], + [ 0.0609], + [-0.0508], + [ 0.0202], + [ 0.0512], + [ 0.0855], + [-0.0123], + [ 0.0610], + [-0.0716], + [-0.0008], + [ 0.0067], + [-0.0455], + [ 0.0282], + [-0.0082], + [-0.0080], + [-0.0778], + [ 0.0844], + [-0.0547], + [-0.0045], + [-0.0494], + [ 0.0404], + [ 0.0436], + [-0.0460], + [ 0.0286], + [-0.0112], + [ 0.0720], + [-0.0330], + [ 0.0325], + [ 0.0628], + [-0.0325], + [-0.0705], + [ 0.0658], + [-0.0703], + [-0.0358], + [-0.0527], + [-0.0183], + [ 0.0128], + [-0.0154], + [-0.0877], + [-0.0098], + [-0.0734], + [ 0.0080], + [ 0.0199], + [ 0.0228], + [ 0.0703], + [ 0.0756], + [-0.0249], + [ 0.0822], + [-0.0334], + [-0.0785], + [-0.0511], + [ 0.0746], + [-0.0015], + [ 0.0832], + [ 0.0698], + [ 0.0636], + [-0.0155], + [-0.0503], + [ 0.0105], + [ 0.0238], + [ 0.0527], + [-0.0284], + [ 0.0041], + [ 0.0154], + [-0.0388], + [ 0.0444], + [ 0.0523], + [ 0.0201], + [-0.0384], + [ 0.0403], + [-0.0805], + [-0.0407], + [-0.0254], + [ 0.0554], + [-0.0435], + [ 0.0074], + [-0.0115], + [ 0.0855], + [-0.0026], + [ 0.0277], + [-0.0727], + [-0.0244], + [-0.0264], + [-0.0144], + [-0.0364], + [ 0.0139], + [ 0.0350], + [-0.0669], + [-0.0142], + [-0.0820], + [ 0.0096], + [ 0.0068], + [-0.0126], + [ 0.0386]]], device='cuda:0', requires_grad=True) +2025-06-25 15:15:27,084 - INFO - args.exp_name : Train_Test +2025-06-25 15:15:27,084 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=6, epochs=10, lr=0.001, num_workers=4, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-25 15:15:27,084 - INFO - Starting training with 1 GPUs +2025-06-25 15:15:31,343 - INFO - Total trainable parameters: 1437705 +2025-06-25 15:15:31,349 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-25 15:15:31,351 - INFO - Data loaded: 0 training batches, 0 validation batches, 0 test batches +2025-06-25 16:41:17,856 - INFO - args.exp_name : Train_Test +2025-06-25 16:41:17,858 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=6, epochs=10, lr=0.001, num_workers=4, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-25 16:41:17,858 - INFO - Starting training with 1 GPUs +2025-06-25 16:41:20,100 - INFO - Total trainable parameters: 1437705 +2025-06-25 16:41:20,107 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-25 16:41:20,109 - INFO - Data loaded: 0 training batches, 0 validation batches, 0 test batches +2025-06-25 16:47:35,076 - INFO - args.exp_name : Train_Test +2025-06-25 16:47:35,076 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=4, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-25 16:47:35,077 - INFO - Starting training with 1 GPUs +2025-06-25 16:47:37,397 - INFO - Total trainable parameters: 1437705 +2025-06-25 16:47:37,403 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-25 16:47:37,405 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-25 16:52:48,111 - INFO - args.exp_name : Train_Test +2025-06-25 16:52:48,112 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-25 16:52:48,112 - INFO - Starting training with 1 GPUs +2025-06-25 16:52:50,742 - INFO - Total trainable parameters: 1437705 +2025-06-25 16:52:50,750 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-25 16:52:54,543 - INFO - Batch 0 +2025-06-25 16:52:54,543 - INFO - Points Shape: torch.Size([2, 1, 3, 10000]) +2025-06-25 16:52:54,543 - INFO - Pressure shape: torch.Size([2, 1, 10000]) +2025-06-25 16:52:55,314 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-26 10:08:57,031 - INFO - args.exp_name : Train_Test +2025-06-26 10:08:57,032 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-26 10:08:57,032 - INFO - Starting training with 1 GPUs +2025-06-26 10:09:04,180 - INFO - Total trainable parameters: 1437705 +2025-06-26 10:09:04,185 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-26 10:09:04,187 - INFO - Type of dataset: +2025-06-26 10:09:04,187 - INFO - Number of samples in full_dataset: 3 +2025-06-26 10:17:42,973 - INFO - args.exp_name : Train_Test +2025-06-26 10:17:42,974 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-26 10:17:42,974 - INFO - Starting training with 1 GPUs +2025-06-26 10:17:45,581 - INFO - Total trainable parameters: 1437705 +2025-06-26 10:17:45,587 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-26 10:17:45,588 - INFO - Type of dataset: +2025-06-26 10:17:45,588 - INFO - Number of samples in full_dataset: 3 +2025-06-26 10:17:45,588 - INFO - Number of samples in train_dataset: 1 +2025-06-26 10:17:45,588 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-26 10:23:10,944 - INFO - args.exp_name : Train_Test +2025-06-26 10:23:10,945 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-26 10:23:10,945 - INFO - Starting training with 1 GPUs +2025-06-26 10:23:13,287 - INFO - Total trainable parameters: 1437705 +2025-06-26 10:23:13,293 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-26 10:23:13,294 - INFO - Type of dataset: +2025-06-26 10:23:13,294 - INFO - Number of samples in full_dataset: 3 +2025-06-26 10:26:04,645 - INFO - args.exp_name : Train_Test +2025-06-26 10:26:04,645 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-26 10:26:04,645 - INFO - Starting training with 1 GPUs +2025-06-26 10:26:06,869 - INFO - Total trainable parameters: 1437705 +2025-06-26 10:26:06,874 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-26 10:26:06,875 - INFO - Type of dataset: +2025-06-26 10:26:06,876 - INFO - Number of samples in full_dataset: 3 +2025-06-26 10:26:06,876 - INFO - List all methods and attributs: ['__add__', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__getitems__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_is_protocol', 'dataset', 'indices'] +2025-06-26 10:26:06,876 - INFO - Number of samples in train_dataset: 1 +2025-06-26 10:26:06,876 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-26 10:29:01,617 - INFO - args.exp_name : Train_Test +2025-06-26 10:29:01,617 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-26 10:29:01,618 - INFO - Starting training with 1 GPUs +2025-06-26 10:29:03,817 - INFO - Total trainable parameters: 1437705 +2025-06-26 10:29:03,822 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-26 10:29:03,824 - INFO - Type of dataset: +2025-06-26 10:29:03,824 - INFO - Type of train_dataloader: +2025-06-26 10:29:03,824 - INFO - Number of samples in full_dataset: 3 +2025-06-26 10:29:03,824 - INFO - List all methods and attributs: ['__add__', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__getitems__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_is_protocol', 'dataset', 'indices'] +2025-06-26 10:29:03,824 - INFO - Number of samples in train_dataset: 1 +2025-06-26 10:29:03,824 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-26 10:43:51,594 - INFO - args.exp_name : Train_Test +2025-06-26 10:43:51,595 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-26 10:43:51,595 - INFO - Starting training with 1 GPUs +2025-06-26 10:43:54,222 - INFO - Total trainable parameters: 1437705 +2025-06-26 10:43:54,227 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-26 10:43:54,229 - INFO - Type of dataset: +2025-06-26 10:43:54,229 - INFO - Type of train_dataloader: +2025-06-26 10:43:54,229 - INFO - Number of samples in full_dataset: 5 +2025-06-26 10:43:54,229 - INFO - List all methods and attributs: ['__add__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_get_cache_path', '_is_protocol', '_load_from_cache', '_save_to_cache', 'cache_dir', 'num_points', 'preprocess', 'root_dir', 'sample_point_cloud_with_pressure', 'vtk_files'] +2025-06-26 10:43:54,229 - INFO - Number of samples in train_dataset: 1 +2025-06-26 10:43:54,229 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-26 10:56:27,885 - INFO - args.exp_name : Train_Test +2025-06-26 10:56:27,886 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-26 10:56:27,886 - INFO - Starting training with 1 GPUs +2025-06-26 10:56:30,290 - INFO - Total trainable parameters: 1437705 +2025-06-26 10:56:30,295 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-26 10:56:30,297 - INFO - Type of dataset: +2025-06-26 10:56:30,297 - INFO - Type of train_dataloader: +2025-06-26 10:56:30,297 - INFO - Number of samples in full_dataset: 5 +2025-06-26 10:56:30,297 - INFO - List all methods and attributs: ['__add__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_get_cache_path', '_is_protocol', '_load_from_cache', '_save_to_cache', 'cache_dir', 'num_points', 'preprocess', 'root_dir', 'sample_point_cloud_with_pressure', 'vtk_files'] +2025-06-26 10:56:30,297 - INFO - Root Dir: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK +2025-06-26 10:56:30,297 - INFO - Number of .vtk files: 5 +2025-06-26 11:06:34,241 - INFO - args.exp_name : Train_Test +2025-06-26 11:06:34,242 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-26 11:06:34,242 - INFO - Starting training with 1 GPUs +2025-06-26 11:06:36,686 - INFO - Total trainable parameters: 1437705 +2025-06-26 11:06:36,691 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-26 11:06:36,694 - INFO - Type of dataset: +2025-06-26 11:06:36,694 - INFO - Number of samples in full_dataset: 1 +2025-06-26 11:06:36,694 - INFO - List all methods and attributs: ['_DataLoader__initialized', '_DataLoader__multiprocessing_context', '_IterableDataset_len_called', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_auto_collation', '_dataset_kind', '_get_iterator', '_index_sampler', '_is_protocol', '_iterator', 'batch_sampler', 'batch_size', 'check_worker_number_rationality', 'collate_fn', 'dataset', 'drop_last', 'generator', 'multiprocessing_context', 'num_workers', 'persistent_workers', 'pin_memory', 'pin_memory_device', 'prefetch_factor', 'sampler', 'timeout', 'worker_init_fn'] +2025-06-26 11:06:36,694 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-26 11:07:56,846 - INFO - args.exp_name : Train_Test +2025-06-26 11:07:56,847 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-26 11:07:56,847 - INFO - Starting training with 1 GPUs +2025-06-26 11:07:59,164 - INFO - Total trainable parameters: 1437705 +2025-06-26 11:07:59,169 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-26 11:07:59,171 - INFO - Type of dataset: +2025-06-26 11:07:59,171 - INFO - Number of samples in full_dataset: 1 +2025-06-26 11:07:59,171 - INFO - List all methods and attributs: ['_DataLoader__initialized', '_DataLoader__multiprocessing_context', '_IterableDataset_len_called', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_auto_collation', '_dataset_kind', '_get_iterator', '_index_sampler', '_is_protocol', '_iterator', 'batch_sampler', 'batch_size', 'check_worker_number_rationality', 'collate_fn', 'dataset', 'drop_last', 'generator', 'multiprocessing_context', 'num_workers', 'persistent_workers', 'pin_memory', 'pin_memory_device', 'prefetch_factor', 'sampler', 'timeout', 'worker_init_fn'] +2025-06-26 11:07:59,171 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-26 11:08:21,969 - INFO - args.exp_name : Train_Test +2025-06-26 11:08:21,970 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-26 11:08:21,970 - INFO - Starting training with 1 GPUs +2025-06-26 11:08:24,248 - INFO - Total trainable parameters: 1437705 +2025-06-26 11:08:24,253 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-26 11:08:24,254 - INFO - Type of dataset: +2025-06-26 11:08:24,254 - INFO - Number of samples in full_dataset: 1 +2025-06-26 11:08:24,255 - INFO - List all methods and attributs: ['_DataLoader__initialized', '_DataLoader__multiprocessing_context', '_IterableDataset_len_called', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_auto_collation', '_dataset_kind', '_get_iterator', '_index_sampler', '_is_protocol', '_iterator', 'batch_sampler', 'batch_size', 'check_worker_number_rationality', 'collate_fn', 'dataset', 'drop_last', 'generator', 'multiprocessing_context', 'num_workers', 'persistent_workers', 'pin_memory', 'pin_memory_device', 'prefetch_factor', 'sampler', 'timeout', 'worker_init_fn'] +2025-06-26 11:08:24,255 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-26 11:20:04,681 - INFO - args.exp_name : Train_Test +2025-06-26 11:20:04,682 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-26 11:20:04,682 - INFO - Starting training with 1 GPUs +2025-06-26 11:20:07,150 - INFO - Total trainable parameters: 1437705 +2025-06-26 11:20:07,155 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-26 11:20:07,157 - INFO - Type of dataset: +2025-06-26 11:20:07,157 - INFO - Number of samples in full_dataset: 1 +2025-06-26 11:20:07,157 - INFO - List all methods and attributs: ['_DataLoader__initialized', '_DataLoader__multiprocessing_context', '_IterableDataset_len_called', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_auto_collation', '_dataset_kind', '_get_iterator', '_index_sampler', '_is_protocol', '_iterator', 'batch_sampler', 'batch_size', 'check_worker_number_rationality', 'collate_fn', 'dataset', 'drop_last', 'generator', 'multiprocessing_context', 'num_workers', 'persistent_workers', 'pin_memory', 'pin_memory_device', 'prefetch_factor', 'sampler', 'timeout', 'worker_init_fn'] +2025-06-26 11:20:10,940 - INFO - Batch: tensor([[[[-3.1358e-03, 3.7210e+00, 3.7106e+00, ..., -5.3803e-01, + 1.7617e+00, 1.4177e+00], + [ 5.4775e-01, -2.0625e-01, -6.1869e-01, ..., 3.5577e-01, + -7.1609e-01, -8.8304e-01], + [ 6.8388e-01, 3.4406e-01, 8.5621e-01, ..., 7.4636e-01, + 1.0855e+00, 4.9376e-01]]], + + + [[[ 2.0024e+00, 2.6824e+00, -5.8603e-01, ..., 5.5839e-01, + 2.7609e+00, 3.2322e+00], + [-6.1381e-01, 6.7716e-01, 8.0544e-01, ..., -7.6071e-01, + 8.1247e-01, 8.5958e-01], + [ 1.2874e+00, 3.8066e-01, 4.5000e-01, ..., 8.8547e-01, + 6.8198e-01, 5.7108e-01]]]]) +2025-06-26 11:25:24,191 - INFO - args.exp_name : Train_Test +2025-06-26 11:25:24,191 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-26 11:25:24,192 - INFO - Starting training with 1 GPUs +2025-06-26 11:25:26,538 - INFO - Total trainable parameters: 1437705 +2025-06-26 11:25:26,543 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-26 11:25:26,545 - INFO - Type of dataset: +2025-06-26 11:25:26,545 - INFO - Number of samples in full_dataset: 1 +2025-06-26 11:25:26,545 - INFO - List all methods and attributs: ['_DataLoader__initialized', '_DataLoader__multiprocessing_context', '_IterableDataset_len_called', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_auto_collation', '_dataset_kind', '_get_iterator', '_index_sampler', '_is_protocol', '_iterator', 'batch_sampler', 'batch_size', 'check_worker_number_rationality', 'collate_fn', 'dataset', 'drop_last', 'generator', 'multiprocessing_context', 'num_workers', 'persistent_workers', 'pin_memory', 'pin_memory_device', 'prefetch_factor', 'sampler', 'timeout', 'worker_init_fn'] +2025-06-26 11:25:30,308 - INFO - Batch: 0 +2025-06-26 11:25:30,312 - INFO - Batch.points: tensor([[[[-3.1358e-03, 3.7210e+00, 3.7106e+00, ..., -5.3803e-01, + 1.7617e+00, 1.4177e+00], + [ 5.4775e-01, -2.0625e-01, -6.1869e-01, ..., 3.5577e-01, + -7.1609e-01, -8.8304e-01], + [ 6.8388e-01, 3.4406e-01, 8.5621e-01, ..., 7.4636e-01, + 1.0855e+00, 4.9376e-01]]], + + + [[[ 2.0024e+00, 2.6824e+00, -5.8603e-01, ..., 5.5839e-01, + 2.7609e+00, 3.2322e+00], + [-6.1381e-01, 6.7716e-01, 8.0544e-01, ..., -7.6071e-01, + 8.1247e-01, 8.5958e-01], + [ 1.2874e+00, 3.8066e-01, 4.5000e-01, ..., 8.8547e-01, + 6.8198e-01, 5.7108e-01]]]]) +2025-06-26 11:25:30,312 - INFO - Batch.Pressure: tensor([[[-149.8820, -47.8371, -30.8754, ..., -213.2100, -296.0330, + -58.6534]], + + [[ -88.2562, -86.5040, -322.1380, ..., -8.5429, -83.1721, + -153.7220]]]) +2025-06-26 11:25:30,780 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-26 17:04:28,780 - INFO - args.exp_name : Train_Test +2025-06-26 17:04:28,780 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-26 17:04:28,780 - INFO - Starting training with 1 GPUs +2025-06-26 17:04:35,373 - INFO - Total trainable parameters: 1437705 +2025-06-26 17:04:35,378 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-26 17:04:35,380 - INFO - Type of train_dataloader: +2025-06-26 17:04:35,380 - INFO - Number of train_dataloader: 1 +2025-06-26 17:04:35,380 - INFO - List all methods and attributs of Dataloader: ['_DataLoader__initialized', '_DataLoader__multiprocessing_context', '_IterableDataset_len_called', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_auto_collation', '_dataset_kind', '_get_iterator', '_index_sampler', '_is_protocol', '_iterator', 'batch_sampler', 'batch_size', 'check_worker_number_rationality', 'collate_fn', 'dataset', 'drop_last', 'generator', 'multiprocessing_context', 'num_workers', 'persistent_workers', 'pin_memory', 'pin_memory_device', 'prefetch_factor', 'sampler', 'timeout', 'worker_init_fn'] +2025-06-26 17:04:39,574 - INFO - Batch: 0 +2025-06-26 17:04:39,578 - INFO - Batch.points: tensor([[[[-3.1358e-03, 3.7210e+00, 3.7106e+00, ..., -5.3803e-01, + 1.7617e+00, 1.4177e+00], + [ 5.4775e-01, -2.0625e-01, -6.1869e-01, ..., 3.5577e-01, + -7.1609e-01, -8.8304e-01], + [ 6.8388e-01, 3.4406e-01, 8.5621e-01, ..., 7.4636e-01, + 1.0855e+00, 4.9376e-01]]], + + + [[[ 2.0024e+00, 2.6824e+00, -5.8603e-01, ..., 5.5839e-01, + 2.7609e+00, 3.2322e+00], + [-6.1381e-01, 6.7716e-01, 8.0544e-01, ..., -7.6071e-01, + 8.1247e-01, 8.5958e-01], + [ 1.2874e+00, 3.8066e-01, 4.5000e-01, ..., 8.8547e-01, + 6.8198e-01, 5.7108e-01]]]]) +2025-06-26 17:04:39,579 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-26 17:04:39,579 - INFO - Batch.Pressure: tensor([[[-149.8820, -47.8371, -30.8754, ..., -213.2100, -296.0330, + -58.6534]], + + [[ -88.2562, -86.5040, -322.1380, ..., -8.5429, -83.1721, + -153.7220]]]) +2025-06-26 17:04:39,579 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-26 17:04:40,349 - INFO - Type of dataset: +2025-06-26 17:04:40,353 - INFO - Number of samples of subset : 3 +2025-06-26 17:04:40,353 - INFO - Subset indices: [1, 2, 4] +2025-06-26 17:04:40,353 - INFO - List all methods and attributs of subset: ['__add__', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__getitems__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slotnames__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_is_protocol', 'dataset', 'indices']) +2025-06-26 17:04:40,353 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-26 17:08:47,931 - INFO - args.exp_name : Train_Test +2025-06-26 17:08:47,932 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-26 17:08:47,932 - INFO - Starting training with 1 GPUs +2025-06-26 17:08:52,223 - INFO - Total trainable parameters: 1437705 +2025-06-26 17:08:52,228 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-26 17:08:52,230 - INFO - Type of train_dataloader: +2025-06-26 17:08:52,230 - INFO - Number of train_dataloader: 1 +2025-06-26 17:08:52,230 - INFO - List all methods and attributs of Dataloader: ['_DataLoader__initialized', '_DataLoader__multiprocessing_context', '_IterableDataset_len_called', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_auto_collation', '_dataset_kind', '_get_iterator', '_index_sampler', '_is_protocol', '_iterator', 'batch_sampler', 'batch_size', 'check_worker_number_rationality', 'collate_fn', 'dataset', 'drop_last', 'generator', 'multiprocessing_context', 'num_workers', 'persistent_workers', 'pin_memory', 'pin_memory_device', 'prefetch_factor', 'sampler', 'timeout', 'worker_init_fn'] +2025-06-26 17:08:56,813 - INFO - Batch: 0 +2025-06-26 17:08:56,818 - INFO - Batch.points: tensor([[[[-3.1358e-03, 3.7210e+00, 3.7106e+00, ..., -5.3803e-01, + 1.7617e+00, 1.4177e+00], + [ 5.4775e-01, -2.0625e-01, -6.1869e-01, ..., 3.5577e-01, + -7.1609e-01, -8.8304e-01], + [ 6.8388e-01, 3.4406e-01, 8.5621e-01, ..., 7.4636e-01, + 1.0855e+00, 4.9376e-01]]], + + + [[[ 2.0024e+00, 2.6824e+00, -5.8603e-01, ..., 5.5839e-01, + 2.7609e+00, 3.2322e+00], + [-6.1381e-01, 6.7716e-01, 8.0544e-01, ..., -7.6071e-01, + 8.1247e-01, 8.5958e-01], + [ 1.2874e+00, 3.8066e-01, 4.5000e-01, ..., 8.8547e-01, + 6.8198e-01, 5.7108e-01]]]]) +2025-06-26 17:08:56,819 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-26 17:08:56,819 - INFO - Batch.Pressure: tensor([[[-149.8820, -47.8371, -30.8754, ..., -213.2100, -296.0330, + -58.6534]], + + [[ -88.2562, -86.5040, -322.1380, ..., -8.5429, -83.1721, + -153.7220]]]) +2025-06-26 17:08:56,819 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-26 17:08:58,092 - INFO - Type of dataset: +2025-06-26 17:08:58,095 - INFO - Number of samples of subset : 3 +2025-06-26 17:08:58,095 - INFO - Subset indices: [1, 2, 4] +2025-06-26 17:08:58,095 - INFO - List the train_dataset vtk files: +2025-06-26 17:08:58,095 - INFO - 0: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk +2025-06-26 17:08:58,096 - INFO - 1: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk +2025-06-26 17:08:58,096 - INFO - 2: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk +2025-06-26 17:08:58,096 - INFO - List all methods and attributs of subset: ['__add__', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__getitems__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slotnames__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_is_protocol', 'dataset', 'indices']) +2025-06-26 17:08:58,096 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-27 09:58:41,782 - INFO - args.exp_name : Train_Test +2025-06-27 09:58:41,783 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 09:58:41,783 - INFO - Starting training with 1 GPUs +2025-06-27 09:58:50,184 - INFO - Total trainable parameters: 1437705 +2025-06-27 09:58:50,191 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-27 09:58:50,193 - INFO - Type of train_dataloader: +2025-06-27 09:58:50,193 - INFO - Number of train_dataloader: 1 +2025-06-27 10:00:22,563 - INFO - args.exp_name : Train_Test +2025-06-27 10:00:22,564 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 10:00:22,564 - INFO - Starting training with 1 GPUs +2025-06-27 10:00:25,666 - INFO - Total trainable parameters: 1437705 +2025-06-27 10:00:25,674 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-27 10:00:25,676 - INFO - Type of train_dataloader: +2025-06-27 10:00:25,677 - INFO - Number of train_dataloader: 1 +2025-06-27 10:00:25,677 - INFO - List all methods and attributs of Dataloader: ['_DataLoader__initialized', '_DataLoader__multiprocessing_context', '_IterableDataset_len_called', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_auto_collation', '_dataset_kind', '_get_iterator', '_index_sampler', '_is_protocol', '_iterator', 'batch_sampler', 'batch_size', 'check_worker_number_rationality', 'collate_fn', 'dataset', 'drop_last', 'generator', 'multiprocessing_context', 'num_workers', 'persistent_workers', 'pin_memory', 'pin_memory_device', 'prefetch_factor', 'sampler', 'timeout', 'worker_init_fn'] +2025-06-27 10:00:25,677 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 10:00:29,671 - INFO - Batch: 0 +2025-06-27 10:00:29,675 - INFO - Batch.points: tensor([[[[-3.1358e-03, 3.7210e+00, 3.7106e+00, ..., -5.3803e-01, + 1.7617e+00, 1.4177e+00], + [ 5.4775e-01, -2.0625e-01, -6.1869e-01, ..., 3.5577e-01, + -7.1609e-01, -8.8304e-01], + [ 6.8388e-01, 3.4406e-01, 8.5621e-01, ..., 7.4636e-01, + 1.0855e+00, 4.9376e-01]]], + + + [[[ 2.0024e+00, 2.6824e+00, -5.8603e-01, ..., 5.5839e-01, + 2.7609e+00, 3.2322e+00], + [-6.1381e-01, 6.7716e-01, 8.0544e-01, ..., -7.6071e-01, + 8.1247e-01, 8.5958e-01], + [ 1.2874e+00, 3.8066e-01, 4.5000e-01, ..., 8.8547e-01, + 6.8198e-01, 5.7108e-01]]]]) +2025-06-27 10:00:29,675 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 10:00:29,675 - INFO - Batch.Pressure: tensor([[[-149.8820, -47.8371, -30.8754, ..., -213.2100, -296.0330, + -58.6534]], + + [[ -88.2562, -86.5040, -322.1380, ..., -8.5429, -83.1721, + -153.7220]]]) +2025-06-27 10:00:29,676 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 10:00:30,174 - INFO - Type of dataset: +2025-06-27 10:00:30,174 - INFO - Number of samples of subset : 3 +2025-06-27 10:00:30,175 - INFO - Subset indices: [1, 2, 4] +2025-06-27 10:00:30,175 - INFO - List the train_dataset vtk files: +2025-06-27 10:00:30,175 - INFO - 0: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk +2025-06-27 10:00:30,175 - INFO - 1: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk +2025-06-27 10:00:30,175 - INFO - 2: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk +2025-06-27 10:00:30,175 - INFO - List all methods and attributs of subset: ['__add__', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__getitems__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slotnames__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_is_protocol', 'dataset', 'indices']) +2025-06-27 10:00:30,175 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-27 10:41:56,752 - INFO - args.exp_name : Train_Test +2025-06-27 10:41:56,753 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 10:41:56,754 - INFO - Starting training with 1 GPUs +2025-06-27 10:42:00,358 - INFO - Total trainable parameters: 1437705 +2025-06-27 10:42:00,364 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-27 10:42:00,365 - INFO - Type of train_dataloader: +2025-06-27 10:42:00,365 - INFO - Number of train_dataloader: 1 +2025-06-27 10:42:00,365 - INFO - List all methods and attributs of Dataloader: ['_DataLoader__initialized', '_DataLoader__multiprocessing_context', '_IterableDataset_len_called', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_auto_collation', '_dataset_kind', '_get_iterator', '_index_sampler', '_is_protocol', '_iterator', 'batch_sampler', 'batch_size', 'check_worker_number_rationality', 'collate_fn', 'dataset', 'drop_last', 'generator', 'multiprocessing_context', 'num_workers', 'persistent_workers', 'pin_memory', 'pin_memory_device', 'prefetch_factor', 'sampler', 'timeout', 'worker_init_fn'] +2025-06-27 10:42:00,365 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 10:42:04,914 - INFO - Batch: 0 +2025-06-27 10:42:04,914 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 10:42:04,914 - INFO - sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 10:42:04,915 - INFO - sample_0.shape: torch.Size([3, 10000]) +2025-06-27 10:42:04,919 - INFO - The first 10 points in x_coor: tensor([-3.1358e-03, 3.7210e+00, 3.7106e+00, 1.5323e+00, -1.1745e-01, + 1.4169e+00, 2.9957e+00, -2.5156e-02, -2.6220e-02, 1.2272e-01]) +2025-06-27 10:42:04,919 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 10:42:05,347 - INFO - Type of dataset: +2025-06-27 10:42:05,347 - INFO - Number of samples of subset : 3 +2025-06-27 10:42:05,347 - INFO - Subset indices: [1, 2, 4] +2025-06-27 10:42:05,347 - INFO - List the train_dataset vtk files: +2025-06-27 10:42:05,347 - INFO - 0: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk +2025-06-27 10:42:05,347 - INFO - 1: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk +2025-06-27 10:42:05,349 - INFO - 2: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk +2025-06-27 10:42:05,349 - INFO - List all methods and attributs of subset: ['__add__', '__annotations__', '__class__', '__class_getitem__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__getitems__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__orig_bases__', '__parameters__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slotnames__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_is_protocol', 'dataset', 'indices']) +2025-06-27 10:42:05,349 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-27 10:46:20,014 - INFO - args.exp_name : Train_Test +2025-06-27 10:46:20,015 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 10:46:20,015 - INFO - Starting training with 1 GPUs +2025-06-27 10:46:22,932 - INFO - Total trainable parameters: 1437705 +2025-06-27 10:46:22,938 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-27 10:46:22,939 - INFO - Type of train_dataloader: +2025-06-27 10:46:22,939 - INFO - Number of train_dataloader: 1 +2025-06-27 10:46:22,939 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 10:46:27,409 - INFO - Batch: 0 +2025-06-27 10:46:27,409 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 10:46:27,409 - INFO - sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 10:46:27,410 - INFO - sample_0.shape: torch.Size([3, 10000]) +2025-06-27 10:46:27,460 - INFO - The first 10 points in x_coor for the sample_0: tensor([-3.1358e-03, 3.7210e+00, 3.7106e+00, 1.5323e+00, -1.1745e-01, + 1.4169e+00, 2.9957e+00, -2.5156e-02, -2.6220e-02, 1.2272e-01]) +2025-06-27 10:46:27,460 - INFO - The first 10 points in x_coor for the sample_1: tensor([-3.1358e-03, 3.7210e+00, 3.7106e+00, 1.5323e+00, -1.1745e-01, + 1.4169e+00, 2.9957e+00, -2.5156e-02, -2.6220e-02, 1.2272e-01]) +2025-06-27 10:46:27,460 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 10:46:27,930 - INFO - Type of dataset: +2025-06-27 10:46:27,930 - INFO - Number of samples of subset : 3 +2025-06-27 10:46:27,930 - INFO - Subset indices: [1, 2, 4] +2025-06-27 10:46:27,930 - INFO - List the train_dataset vtk files: +2025-06-27 10:46:27,930 - INFO - 0: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk +2025-06-27 10:46:27,930 - INFO - 1: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk +2025-06-27 10:46:27,930 - INFO - 2: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk +2025-06-27 10:46:27,930 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-27 10:49:46,093 - INFO - args.exp_name : Train_Test +2025-06-27 10:49:46,095 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 10:49:46,095 - INFO - Starting training with 1 GPUs +2025-06-27 10:49:48,563 - INFO - Total trainable parameters: 1437705 +2025-06-27 10:49:48,569 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-27 10:49:48,570 - INFO - Type of train_dataloader: +2025-06-27 10:49:48,570 - INFO - Number of train_dataloader: 1 +2025-06-27 10:49:48,570 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 10:49:53,438 - INFO - Batch: 0 +2025-06-27 10:49:53,439 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 10:49:53,439 - INFO - sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 10:49:53,439 - INFO - sample_0.shape: torch.Size([3, 10000]) +2025-06-27 10:49:53,443 - INFO - The first 10 points in x_coor for the sample_0: tensor([-3.1358e-03, 3.7210e+00, 3.7106e+00, 1.5323e+00, -1.1745e-01, + 1.4169e+00, 2.9957e+00, -2.5156e-02, -2.6220e-02, 1.2272e-01]) +2025-06-27 10:49:53,444 - INFO - The first 10 points in x_coor for the sample_1: tensor([[ 2.0024, 2.6824, -0.5860, ..., 0.5584, 2.7609, 3.2322], + [-0.6138, 0.6772, 0.8054, ..., -0.7607, 0.8125, 0.8596], + [ 1.2874, 0.3807, 0.4500, ..., 0.8855, 0.6820, 0.5711]]) +2025-06-27 10:49:53,444 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 10:49:53,855 - INFO - Type of dataset: +2025-06-27 10:49:53,856 - INFO - Number of samples of subset : 3 +2025-06-27 10:49:53,856 - INFO - Subset indices: [1, 2, 4] +2025-06-27 10:49:53,856 - INFO - List the train_dataset vtk files: +2025-06-27 10:49:53,856 - INFO - 0: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk +2025-06-27 10:49:53,856 - INFO - 1: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk +2025-06-27 10:49:53,856 - INFO - 2: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk +2025-06-27 10:49:53,856 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-27 10:56:27,773 - INFO - args.exp_name : Train_Test +2025-06-27 10:56:27,773 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 10:56:27,773 - INFO - Starting training with 1 GPUs +2025-06-27 10:56:30,420 - INFO - Total trainable parameters: 1437705 +2025-06-27 10:56:30,426 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-27 10:56:30,428 - INFO - Type of train_dataloader: +2025-06-27 10:56:30,428 - INFO - Number of train_dataloader: 1 +2025-06-27 10:56:30,428 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 10:56:34,780 - INFO - Batch: 0 +2025-06-27 10:56:34,780 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 10:56:34,780 - INFO - sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 10:56:34,780 - INFO - sample_0.shape: torch.Size([3, 10000]) +2025-06-27 10:56:34,785 - INFO - The first 10 points in x_coor for the sample_0: tensor([-3.1358e-03, 3.7210e+00, 3.7106e+00, 1.5323e+00, -1.1745e-01, + 1.4169e+00, 2.9957e+00, -2.5156e-02, -2.6220e-02, 1.2272e-01]) +2025-06-27 10:56:34,785 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.0024, 2.6824, -0.5860, 3.1021, 0.9962, -0.3762, 0.0654, -0.5734, + 0.3521, -0.9314]) +2025-06-27 10:56:34,785 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 10:56:35,218 - INFO - Type of dataset: +2025-06-27 10:56:35,218 - INFO - Number of samples of subset : 3 +2025-06-27 10:56:35,218 - INFO - Subset indices: [1, 2, 4] +2025-06-27 10:56:35,218 - INFO - List the train_dataset vtk files: +2025-06-27 10:56:35,218 - INFO - 0: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk +2025-06-27 10:56:35,218 - INFO - 1: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk +2025-06-27 10:56:35,218 - INFO - 2: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk +2025-06-27 10:56:35,218 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-27 11:02:55,623 - INFO - args.exp_name : Train_Test +2025-06-27 11:02:55,624 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 11:02:55,624 - INFO - Starting training with 1 GPUs +2025-06-27 11:02:58,254 - INFO - Total trainable parameters: 1437705 +2025-06-27 11:02:58,263 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-27 11:02:58,266 - INFO - Type of train_dataloader: +2025-06-27 11:02:58,266 - INFO - Number of train_dataloader: 1 +2025-06-27 11:02:58,266 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 11:03:02,882 - INFO - Batch: 0 +2025-06-27 11:03:02,882 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 11:03:02,883 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 11:03:02,883 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 11:03:02,886 - INFO - The first 10 points in x_coor for the sample_0: tensor([-3.1358e-03, 3.7210e+00, 3.7106e+00, 1.5323e+00, -1.1745e-01, + 1.4169e+00, 2.9957e+00, -2.5156e-02, -2.6220e-02, 1.2272e-01]) +2025-06-27 11:03:02,886 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.0024, 2.6824, -0.5860, 3.1021, 0.9962, -0.3762, 0.0654, -0.5734, + 0.3521, -0.9314]) +2025-06-27 11:03:02,886 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 11:03:02,886 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 11:03:02,886 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 11:06:25,820 - INFO - args.exp_name : Train_Test +2025-06-27 11:06:25,821 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 11:06:25,821 - INFO - Starting training with 1 GPUs +2025-06-27 11:06:28,334 - INFO - Total trainable parameters: 1437705 +2025-06-27 11:06:28,343 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-27 11:06:28,346 - INFO - Type of train_dataloader: +2025-06-27 11:06:28,346 - INFO - Number of train_dataloader: 1 +2025-06-27 11:06:28,346 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 11:06:32,805 - INFO - Batch: 0 +2025-06-27 11:06:32,805 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 11:06:32,805 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 11:06:32,805 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 11:06:32,824 - INFO - The first 10 points in x_coor for the sample_0: tensor([-3.1358e-03, 3.7210e+00, 3.7106e+00, 1.5323e+00, -1.1745e-01, + 1.4169e+00, 2.9957e+00, -2.5156e-02, -2.6220e-02, 1.2272e-01]) +2025-06-27 11:06:32,825 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.0024, 2.6824, -0.5860, 3.1021, 0.9962, -0.3762, 0.0654, -0.5734, + 0.3521, -0.9314]) +2025-06-27 11:06:32,825 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 11:06:32,825 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 11:06:32,825 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 11:06:32,825 - INFO - The first 10 points pressure for the sample_0: tensor([-149.8820, -47.8371, -30.8754, -61.2213, -179.6550, -269.8940, + -65.7033, -205.9630, -144.7190, -148.6480]) +2025-06-27 11:06:32,826 - INFO - The first 10 points pressure for the sample_1: tensor([-8.8256e+01, -8.6504e+01, -3.2214e+02, -3.3256e+02, -5.7983e+02, + -8.2140e+01, -1.0871e+02, 4.1819e-02, -1.4809e+02, 2.5899e+02]) +2025-06-27 11:06:33,312 - INFO - Type of dataset: +2025-06-27 11:06:33,313 - INFO - Number of samples of subset : 3 +2025-06-27 11:06:33,313 - INFO - Subset indices: [1, 2, 4] +2025-06-27 11:06:33,313 - INFO - List the train_dataset vtk files: +2025-06-27 11:06:33,313 - INFO - 0: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk +2025-06-27 11:06:33,313 - INFO - 1: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk +2025-06-27 11:06:33,313 - INFO - 2: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk +2025-06-27 11:06:33,315 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-27 11:20:43,167 - INFO - args.exp_name : Train_Test +2025-06-27 11:20:43,168 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 11:20:43,168 - INFO - Starting training with 1 GPUs +2025-06-27 11:20:45,727 - INFO - Total trainable parameters: 1437705 +2025-06-27 11:20:45,733 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-27 11:20:45,734 - INFO - Type of train_dataloader: +2025-06-27 11:20:45,734 - INFO - Number of train_dataloader: 1 +2025-06-27 11:20:45,734 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 11:20:50,283 - INFO - Batch: 0 +2025-06-27 11:20:50,283 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 11:20:50,283 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 11:20:50,283 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 11:20:50,290 - INFO - The first 10 points in x_coor for the sample_0: tensor([-3.1358e-03, 3.7210e+00, 3.7106e+00, 1.5323e+00, -1.1745e-01, + 1.4169e+00, 2.9957e+00, -2.5156e-02, -2.6220e-02, 1.2272e-01]) +2025-06-27 11:20:50,290 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.0024, 2.6824, -0.5860, 3.1021, 0.9962, -0.3762, 0.0654, -0.5734, + 0.3521, -0.9314]) +2025-06-27 11:20:50,290 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 11:20:50,290 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 11:20:50,290 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 11:20:50,291 - INFO - The first 10 points pressure for the sample_0: tensor([-149.8820, -47.8371, -30.8754, -61.2213, -179.6550, -269.8940, + -65.7033, -205.9630, -144.7190, -148.6480]) +2025-06-27 11:20:50,291 - INFO - The first 10 points pressure for the sample_1: tensor([-8.8256e+01, -8.6504e+01, -3.2214e+02, -3.3256e+02, -5.7983e+02, + -8.2140e+01, -1.0871e+02, 4.1819e-02, -1.4809e+02, 2.5899e+02]) +2025-06-27 11:20:50,719 - INFO - Type of train_subset: +2025-06-27 11:20:50,719 - INFO - Number of samples of train_subset : 3 +2025-06-27 11:20:50,720 - INFO - Subset indices: [1, 2, 4] +2025-06-27 11:20:50,720 - INFO - List the train_subset vtk files: +2025-06-27 11:20:50,720 - INFO - 0: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk +2025-06-27 11:20:50,720 - INFO - 1: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk +2025-06-27 11:20:50,720 - INFO - 2: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk +2025-06-27 11:20:50,722 - INFO - Type of full_dataset: +2025-06-27 11:20:50,722 - INFO - Number of samples of full_dataset: 5 +2025-06-27 12:34:58,883 - INFO - args.exp_name : Train_Test +2025-06-27 12:34:58,884 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 12:34:58,884 - INFO - Starting training with 1 GPUs +2025-06-27 12:35:01,570 - INFO - Total trainable parameters: 1437705 +2025-06-27 12:35:01,576 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-27 12:35:01,577 - INFO - Type of train_dataloader: +2025-06-27 12:35:01,577 - INFO - Number of train_dataloader: 1 +2025-06-27 12:35:01,578 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 12:35:05,924 - INFO - Batch: 0 +2025-06-27 12:35:05,924 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 12:35:05,924 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 12:35:05,924 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 12:35:05,929 - INFO - The first 10 points in x_coor for the sample_0: tensor([-3.1358e-03, 3.7210e+00, 3.7106e+00, 1.5323e+00, -1.1745e-01, + 1.4169e+00, 2.9957e+00, -2.5156e-02, -2.6220e-02, 1.2272e-01]) +2025-06-27 12:35:05,929 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.0024, 2.6824, -0.5860, 3.1021, 0.9962, -0.3762, 0.0654, -0.5734, + 0.3521, -0.9314]) +2025-06-27 12:35:05,929 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 12:35:05,929 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 12:35:05,929 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 12:35:05,930 - INFO - The first 10 points pressure for the sample_0: tensor([-149.8820, -47.8371, -30.8754, -61.2213, -179.6550, -269.8940, + -65.7033, -205.9630, -144.7190, -148.6480]) +2025-06-27 12:35:05,930 - INFO - The first 10 points pressure for the sample_1: tensor([-8.8256e+01, -8.6504e+01, -3.2214e+02, -3.3256e+02, -5.7983e+02, + -8.2140e+01, -1.0871e+02, 4.1819e-02, -1.4809e+02, 2.5899e+02]) +2025-06-27 12:35:06,408 - INFO - Type of train_subset: +2025-06-27 12:35:06,408 - INFO - Number of samples of train_subset : 3 +2025-06-27 12:35:06,408 - INFO - Subset indices: [1, 2, 4] +2025-06-27 12:35:06,408 - INFO - List the train_subset vtk files: +2025-06-27 12:35:06,408 - INFO - 0: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk +2025-06-27 12:35:06,408 - INFO - 1: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk +2025-06-27 12:35:06,408 - INFO - 2: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk +2025-06-27 12:35:06,414 - INFO - Type of full_dataset: +2025-06-27 12:35:06,414 - INFO - Number of samples of full_dataset: 5 +2025-06-27 12:35:06,414 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_009.vtk: 0 +2025-06-27 12:35:06,414 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk: 1 +2025-06-27 12:35:06,417 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk: 2 +2025-06-27 12:35:06,418 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_002.vtk: 3 +2025-06-27 12:35:06,418 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk: 4 +2025-06-27 12:35:06,418 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-27 14:22:56,383 - INFO - args.exp_name : Train_Test +2025-06-27 14:22:56,384 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 14:22:56,384 - INFO - Starting training with 1 GPUs +2025-06-27 14:22:59,237 - INFO - Total trainable parameters: 1437705 +2025-06-27 14:22:59,243 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-27 14:22:59,244 - INFO - Type of train_dataloader: +2025-06-27 14:22:59,244 - INFO - Number of train_dataloader: 1 +2025-06-27 14:22:59,244 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 14:23:03,473 - INFO - Batch: 0 +2025-06-27 14:23:03,473 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:23:03,473 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:23:03,473 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:23:03,476 - INFO - The first 10 points in x_coor for the sample_0: tensor([-3.1358e-03, 3.7210e+00, 3.7106e+00, 1.5323e+00, -1.1745e-01, + 1.4169e+00, 2.9957e+00, -2.5156e-02, -2.6220e-02, 1.2272e-01]) +2025-06-27 14:23:03,477 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.0024, 2.6824, -0.5860, 3.1021, 0.9962, -0.3762, 0.0654, -0.5734, + 0.3521, -0.9314]) +2025-06-27 14:23:03,477 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:23:03,477 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:23:03,477 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:23:03,477 - INFO - The first 10 points pressure for the sample_0: tensor([-149.8820, -47.8371, -30.8754, -61.2213, -179.6550, -269.8940, + -65.7033, -205.9630, -144.7190, -148.6480]) +2025-06-27 14:23:03,478 - INFO - The first 10 points pressure for the sample_1: tensor([-8.8256e+01, -8.6504e+01, -3.2214e+02, -3.3256e+02, -5.7983e+02, + -8.2140e+01, -1.0871e+02, 4.1819e-02, -1.4809e+02, 2.5899e+02]) +2025-06-27 14:23:03,936 - INFO - Type of train_subset: +2025-06-27 14:23:03,936 - INFO - Number of samples of train_subset : 3 +2025-06-27 14:23:03,936 - INFO - Subset indices: [1, 2, 4] +2025-06-27 14:23:03,936 - INFO - List the train_subset vtk files: +2025-06-27 14:23:03,936 - INFO - 0: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk +2025-06-27 14:23:03,936 - INFO - 1: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk +2025-06-27 14:23:03,936 - INFO - 2: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk +2025-06-27 14:23:03,938 - INFO - Type of full_dataset: +2025-06-27 14:23:03,938 - INFO - Number of samples of full_dataset: 5 +2025-06-27 14:23:03,938 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_009.vtk: 0 +2025-06-27 14:23:03,938 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk: 1 +2025-06-27 14:23:03,939 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk: 2 +2025-06-27 14:23:03,939 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_002.vtk: 3 +2025-06-27 14:23:03,939 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk: 4 +2025-06-27 14:23:03,939 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-27 14:32:10,788 - INFO - args.exp_name : Train_Test +2025-06-27 14:32:10,788 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 14:32:10,788 - INFO - Starting training with 1 GPUs +2025-06-27 14:32:13,702 - INFO - Total trainable parameters: 1437705 +2025-06-27 14:32:13,707 - ERROR - No matching VTK files found for IDs in /work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits/val_design_ids.txt. +2025-06-27 14:32:13,709 - INFO - Type of train_dataloader: +2025-06-27 14:32:13,709 - INFO - Number of train_dataloader: 1 +2025-06-27 14:32:13,709 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 14:32:17,927 - INFO - Batch: 0 +2025-06-27 14:32:17,928 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:32:17,928 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:32:17,928 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:32:17,932 - INFO - The first 10 points in x_coor for the sample_0: tensor([-3.1358e-03, 3.7210e+00, 3.7106e+00, 1.5323e+00, -1.1745e-01, + 1.4169e+00, 2.9957e+00, -2.5156e-02, -2.6220e-02, 1.2272e-01]) +2025-06-27 14:32:17,932 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.0024, 2.6824, -0.5860, 3.1021, 0.9962, -0.3762, 0.0654, -0.5734, + 0.3521, -0.9314]) +2025-06-27 14:32:17,932 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:32:17,932 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:32:17,932 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:32:17,933 - INFO - The first 10 points pressure for the sample_0: tensor([-149.8820, -47.8371, -30.8754, -61.2213, -179.6550, -269.8940, + -65.7033, -205.9630, -144.7190, -148.6480]) +2025-06-27 14:32:17,933 - INFO - The first 10 points pressure for the sample_1: tensor([-8.8256e+01, -8.6504e+01, -3.2214e+02, -3.3256e+02, -5.7983e+02, + -8.2140e+01, -1.0871e+02, 4.1819e-02, -1.4809e+02, 2.5899e+02]) +2025-06-27 14:32:18,694 - INFO - Type of train_subset: +2025-06-27 14:32:18,694 - INFO - Number of samples of train_subset : 3 +2025-06-27 14:32:18,694 - INFO - Subset indices: [1, 2, 4] +2025-06-27 14:32:18,694 - INFO - List the train_subset vtk files: +2025-06-27 14:32:18,695 - INFO - 0: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk +2025-06-27 14:32:18,695 - INFO - 1: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk +2025-06-27 14:32:18,695 - INFO - 2: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk +2025-06-27 14:32:18,700 - INFO - Type of full_dataset: +2025-06-27 14:32:18,701 - INFO - Number of samples of full_dataset: 5 +2025-06-27 14:32:18,701 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_009.vtk: 0 +2025-06-27 14:32:18,701 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk: 1 +2025-06-27 14:32:18,701 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk: 2 +2025-06-27 14:32:18,701 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_002.vtk: 3 +2025-06-27 14:32:18,701 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk: 4 +2025-06-27 14:32:18,701 - INFO - Data loaded: 1 training batches, 0 validation batches, 1 test batches +2025-06-27 14:32:18,703 - INFO - Demonstrate the parameters: +2025-06-27 14:58:55,273 - INFO - args.exp_name : Train_Test +2025-06-27 14:58:55,274 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 14:58:55,274 - INFO - Starting training with 1 GPUs +2025-06-27 14:58:58,073 - INFO - Total trainable parameters: 1437705 +2025-06-27 14:58:58,128 - INFO - Type of train_dataloader: +2025-06-27 14:58:58,128 - INFO - Number of train_dataloader: 42 +2025-06-27 14:58:58,128 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 14:59:03,483 - INFO - Batch: 0 +2025-06-27 14:59:03,483 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,483 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,483 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,498 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.0485, -0.5382, 0.8928, -0.5284, 0.1444, 2.6208, 2.9762, 3.6200, + -0.3787, 1.3031]) +2025-06-27 14:59:03,498 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.2833, 2.6895, 2.9486, 1.6583, -0.7939, 2.0023, 3.1221, -0.4969, + 1.6010, 3.6084]) +2025-06-27 14:59:03,498 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,499 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,499 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,499 - INFO - The first 10 points pressure for the sample_0: tensor([-338.8030, -198.2250, -328.4480, -501.5070, -109.0920, -77.5971, + -91.8080, -29.4908, -284.0090, -91.4154]) +2025-06-27 14:59:03,499 - INFO - The first 10 points pressure for the sample_1: tensor([-184.4790, -81.4952, -111.3050, -65.2745, 262.2230, -241.2780, + -14.4769, -133.4740, -90.2832, -18.6696]) +2025-06-27 14:59:03,501 - INFO - Batch: 1 +2025-06-27 14:59:03,501 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,501 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,501 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,501 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.3513, 1.2264, 2.3647, 0.1573, -0.5166, 0.6042, 2.8162, 3.3206, + 2.9534, 3.4002]) +2025-06-27 14:59:03,502 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.3273, 2.5158, 1.0972, 3.1233, 2.9517, 0.5927, 1.0853, 2.0020, + 1.7042, 1.0168]) +2025-06-27 14:59:03,502 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,502 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,502 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,503 - INFO - The first 10 points pressure for the sample_0: tensor([-148.8720, -323.1080, -107.6920, -145.9240, -122.9890, -107.0480, + -73.7163, -52.5742, -81.2839, -54.2815]) +2025-06-27 14:59:03,506 - INFO - The first 10 points pressure for the sample_1: tensor([-162.1830, -182.7360, -78.5279, -28.9796, -74.6324, -160.5080, + -74.9221, -181.3020, -57.2525, -253.2530]) +2025-06-27 14:59:03,507 - INFO - Batch: 2 +2025-06-27 14:59:03,507 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,507 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,507 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,507 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.6993, 2.4030, 0.8899, 2.0135, 3.2732, 2.8507, -0.0716, 1.6125, + 2.9042, 0.3750]) +2025-06-27 14:59:03,508 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.1687, 0.1376, 1.5781, 2.6907, -0.8913, 3.2266, -0.2999, 1.4406, + 3.1044, 0.3440]) +2025-06-27 14:59:03,508 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,508 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,508 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,508 - INFO - The first 10 points pressure for the sample_0: tensor([-208.3020, -73.5788, -553.2300, -180.9300, -24.8334, -148.6590, + -183.7780, -86.4613, -121.7070, -128.1350]) +2025-06-27 14:59:03,509 - INFO - The first 10 points pressure for the sample_1: tensor([-165.9060, -261.3880, -70.9110, -124.4570, 266.7510, -80.9190, + -47.6652, -66.3573, -81.4829, -123.6120]) +2025-06-27 14:59:03,530 - INFO - Batch: 3 +2025-06-27 14:59:03,530 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,530 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,530 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,531 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.6583, 2.0797, -0.6899, 2.9531, -0.5864, 2.2427, 1.3490, -0.5493, + -0.0602, -0.4740]) +2025-06-27 14:59:03,531 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.3746, 1.6812, 2.2774, 1.6584, 1.9345, 2.5699, -0.2872, 1.8989, + 0.3043, -0.3038]) +2025-06-27 14:59:03,531 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,531 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,531 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,532 - INFO - The first 10 points pressure for the sample_0: tensor([ -57.7625, -188.2780, -41.3928, -56.4165, -216.8250, -48.8867, + -71.5313, -197.1700, -160.1030, -137.5480]) +2025-06-27 14:59:03,532 - INFO - The first 10 points pressure for the sample_1: tensor([ -94.1245, -81.8525, -96.1851, -76.5446, -79.7430, -61.4083, + -213.5430, -109.7750, -182.5630, 287.5700]) +2025-06-27 14:59:03,541 - INFO - Batch: 4 +2025-06-27 14:59:03,541 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,542 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,542 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,542 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.2684, 0.2005, 2.4938, -0.8517, -0.1792, 1.2573, 3.1068, 3.0116, + 0.9479, 1.2227]) +2025-06-27 14:59:03,543 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.2343, 1.0350, -0.3204, 0.1797, 1.8875, 0.1917, -0.0378, 2.6067, + 2.1969, 1.3375]) +2025-06-27 14:59:03,543 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,543 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,543 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,544 - INFO - The first 10 points pressure for the sample_0: tensor([-173.1780, -202.8530, -80.9509, 327.1650, -128.7190, -326.8660, + -40.6500, -19.0033, -106.4500, -370.2060]) +2025-06-27 14:59:03,544 - INFO - The first 10 points pressure for the sample_1: tensor([-131.4480, -711.7170, -99.5009, 28.7482, -64.0691, -150.5970, + -155.0720, -118.3460, -41.2665, -54.1201]) +2025-06-27 14:59:03,551 - INFO - Batch: 5 +2025-06-27 14:59:03,551 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,551 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,551 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,551 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.5521, 2.6666, 0.5497, 0.1573, 2.8155, 2.9984, -0.1173, 2.5646, + -0.9318, 0.5025]) +2025-06-27 14:59:03,552 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.3631, 3.3885, 0.4810, -0.2440, 1.7385, -0.4245, -0.0946, -0.6986, + 2.2198, 2.6981]) +2025-06-27 14:59:03,552 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,552 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,552 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,552 - INFO - The first 10 points pressure for the sample_0: tensor([ -84.8091, -77.7356, 97.0457, -153.8820, -132.9120, -61.9000, + -132.3560, -76.8720, 436.0530, 81.3881]) +2025-06-27 14:59:03,552 - INFO - The first 10 points pressure for the sample_1: tensor([-102.3700, -15.2985, 80.3081, -129.9610, -95.3753, -90.2176, + -133.5760, 22.5415, -142.7650, -88.9344]) +2025-06-27 14:59:03,561 - INFO - Batch: 6 +2025-06-27 14:59:03,561 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,561 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,561 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,561 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.6010, 1.1225, 1.3490, 2.9752, 0.3980, 1.9333, -0.0454, 3.7736, + 0.4439, 2.0478]) +2025-06-27 14:59:03,562 - INFO - The first 10 points in x_coor for the sample_1: tensor([0.2603, 2.6326, 0.7668, 0.3881, 0.9500, 0.7333, 2.7462, 3.3095, 0.7302, + 1.3604]) +2025-06-27 14:59:03,562 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,562 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,562 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,562 - INFO - The first 10 points pressure for the sample_0: tensor([ -72.9790, -519.1740, -101.6530, -85.5379, 82.8267, -191.4840, + -87.2200, -25.2302, -84.1994, -76.7869]) +2025-06-27 14:59:03,563 - INFO - The first 10 points pressure for the sample_1: tensor([-356.8900, -64.7882, 63.8028, -26.0867, -324.4070, 48.0206, + -399.5770, -150.6780, -78.1401, -75.5552]) +2025-06-27 14:59:03,571 - INFO - Batch: 7 +2025-06-27 14:59:03,571 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,571 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,571 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,572 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.5431e+00, 3.2020e+00, 3.6874e+00, 1.5438e+00, 5.2393e-01, + 2.0708e+00, 3.3398e+00, 1.2687e+00, 3.7581e+00, -3.0862e-03]) +2025-06-27 14:59:03,572 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.3220, 0.0083, 3.5499, 2.8617, 1.1195, 3.0793, -0.1098, 2.8404, + 2.3572, 3.3744]) +2025-06-27 14:59:03,572 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,572 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,572 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,573 - INFO - The first 10 points pressure for the sample_0: tensor([ -41.2135, -13.3389, -55.2665, -199.9300, -107.1490, -49.8250, + -57.6707, -68.3031, -34.3428, -156.1310]) +2025-06-27 14:59:03,573 - INFO - The first 10 points pressure for the sample_1: tensor([-186.6740, -176.6460, 28.5065, -86.4264, -184.2440, -153.2230, + -161.5000, 55.2674, -68.8246, 50.5586]) +2025-06-27 14:59:03,581 - INFO - Batch: 8 +2025-06-27 14:59:03,581 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,581 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,581 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,582 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.2492, 0.2958, 0.0304, 0.3405, -0.1605, 2.1281, 0.0187, 1.7500, + 0.7300, 1.3520]) +2025-06-27 14:59:03,582 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.2083, 2.9885, 3.2353, 1.7729, 3.8352, 3.5629, 0.2343, -0.1897, + -0.1860, 0.8676]) +2025-06-27 14:59:03,582 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,582 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,582 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,583 - INFO - The first 10 points pressure for the sample_0: tensor([-399.0650, -154.6710, -195.3250, -35.4192, -752.6250, -166.1940, + -32.8515, -81.4531, 85.0086, -146.5700]) +2025-06-27 14:59:03,583 - INFO - The first 10 points pressure for the sample_1: tensor([ -85.3987, -75.0366, -75.1776, -76.2214, -25.3767, -48.0276, + -294.6700, -621.4550, -99.7808, -125.2340]) +2025-06-27 14:59:03,591 - INFO - Batch: 9 +2025-06-27 14:59:03,591 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,591 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,591 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,592 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.6813, 0.9128, 2.3372, 2.3220, 2.5514, 2.5759, 2.3228, 2.6669, + -0.1183, 0.7985]) +2025-06-27 14:59:03,592 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.8912, 0.3833, 2.2885, -0.3175, 3.1891, -0.0260, -0.5542, 1.2801, + 3.1246, 2.3803]) +2025-06-27 14:59:03,592 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,592 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,592 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,593 - INFO - The first 10 points pressure for the sample_0: tensor([ -74.5930, -468.5290, -17.8253, -111.4660, -279.8660, -89.8917, + -81.7025, -230.2680, -153.5960, 18.8863]) +2025-06-27 14:59:03,593 - INFO - The first 10 points pressure for the sample_1: tensor([ -62.1802, 95.3592, -73.3089, -172.8170, -41.2741, -161.2840, + -57.3265, -262.5780, -101.2380, -39.0603]) +2025-06-27 14:59:03,601 - INFO - Batch: 10 +2025-06-27 14:59:03,601 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,602 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,602 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,602 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.0061, 0.3184, 1.2802, 1.1238, 2.8385, -0.5982, 2.9736, 3.1936, + 2.8499, -0.7896]) +2025-06-27 14:59:03,603 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.7120, -0.3803, 2.9076, 2.5315, 1.9188, -0.3108, 3.8506, 0.5791, + 2.8501, 0.2604]) +2025-06-27 14:59:03,603 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,603 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,603 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,603 - INFO - The first 10 points pressure for the sample_0: tensor([-119.4090, -7.9263, -97.0791, -215.6410, -71.9722, -38.5220, + -76.4743, 1.0474, -67.6804, 296.0490]) +2025-06-27 14:59:03,604 - INFO - The first 10 points pressure for the sample_1: tensor([-112.9390, -178.6530, -134.2990, -31.5704, -71.9506, -192.3010, + -16.1056, 163.5570, -113.4070, -174.5600]) +2025-06-27 14:59:03,611 - INFO - Batch: 11 +2025-06-27 14:59:03,612 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,612 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,612 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,612 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.1075, 3.8310, 2.6669, 2.5005, 0.5240, -0.3776, 0.1010, -0.0613, + 0.8792, 2.9784]) +2025-06-27 14:59:03,613 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.8087, 0.8565, 2.6422, 2.0250, -0.7496, -0.1292, 0.1458, 0.0684, + 1.5663, -0.0601]) +2025-06-27 14:59:03,613 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,613 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,613 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,613 - INFO - The first 10 points pressure for the sample_0: tensor([-159.9450, -22.9807, -89.1701, -74.5420, -94.6650, -170.8510, + -145.5740, -175.5390, -95.2538, -97.0740]) +2025-06-27 14:59:03,614 - INFO - The first 10 points pressure for the sample_1: tensor([ -69.0255, -82.4216, -41.9460, -69.3559, 318.6280, -154.0390, + 65.4091, -241.1140, -95.6229, -201.5230]) +2025-06-27 14:59:03,622 - INFO - Batch: 12 +2025-06-27 14:59:03,622 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,622 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,622 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,623 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.4883, 3.8665, 3.0949, 1.4288, 0.8677, 1.4062, -0.1070, -0.1792, + -0.7087, 1.6583]) +2025-06-27 14:59:03,623 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 1.4521, -0.3804, 2.6021, 2.8123, 0.8792, -0.3793, 0.6042, -0.6245, + 0.3771, 0.4348]) +2025-06-27 14:59:03,623 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,623 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,623 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,624 - INFO - The first 10 points pressure for the sample_0: tensor([ -86.7106, -19.0995, -112.9440, -238.0560, -190.9050, -137.6680, + -31.7392, -184.7600, -52.8803, -75.6506]) +2025-06-27 14:59:03,624 - INFO - The first 10 points pressure for the sample_1: tensor([ -74.8531, -72.0747, -79.8344, -177.5810, -144.1570, -77.6853, + -70.6408, -160.1670, -128.4620, 102.3960]) +2025-06-27 14:59:03,634 - INFO - Batch: 13 +2025-06-27 14:59:03,634 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,634 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,634 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,635 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.1904, 1.5466, 0.2231, -0.7438, -0.6833, 2.7010, 3.1388, -0.5140, + 0.0188, 0.3035]) +2025-06-27 14:59:03,635 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.9523, 3.2254, -0.8287, 2.6178, 0.3874, -0.4131, 1.7728, 2.4983, + -0.7710, 2.7925]) +2025-06-27 14:59:03,635 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,635 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,635 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,635 - INFO - The first 10 points pressure for the sample_0: tensor([ -11.0432, -181.9940, -209.9080, 334.9540, -21.7079, -117.5570, + -177.2140, -252.7250, -114.8210, -136.9090]) +2025-06-27 14:59:03,636 - INFO - The first 10 points pressure for the sample_1: tensor([ 376.2220, -66.3437, -79.2989, -83.8929, 84.3352, -319.6010, + -209.7620, -110.9740, -149.2000, -128.4940]) +2025-06-27 14:59:03,643 - INFO - Batch: 14 +2025-06-27 14:59:03,643 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,643 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,643 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,644 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.9188, 1.9219, -0.2439, -0.2003, 3.6165, 3.7250, -0.6370, -0.1329, + 1.4949, 1.0856]) +2025-06-27 14:59:03,644 - INFO - The first 10 points in x_coor for the sample_1: tensor([0.9479, 0.2294, 0.2146, 1.4291, 2.4146, 0.3630, 3.1962, 3.5604, 2.1624, + 2.9073]) +2025-06-27 14:59:03,644 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,644 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,644 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,644 - INFO - The first 10 points pressure for the sample_0: tensor([ -63.7926, -202.4660, -114.2120, -170.5030, -25.9001, -14.2671, + 202.1390, -156.5060, -74.7689, -302.7870]) +2025-06-27 14:59:03,645 - INFO - The first 10 points pressure for the sample_1: tensor([-101.0460, -168.4900, -162.7870, -163.8160, -32.0137, -122.1340, + -70.1305, 61.3782, -206.4840, -77.4960]) +2025-06-27 14:59:03,653 - INFO - Batch: 15 +2025-06-27 14:59:03,653 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,653 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,653 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,653 - INFO - The first 10 points in x_coor for the sample_0: tensor([1.6354, 2.7358, 3.0108, 2.9425, 0.5582, 3.3426, 1.6927, 2.6554, 1.1885, + 2.9646]) +2025-06-27 14:59:03,654 - INFO - The first 10 points in x_coor for the sample_1: tensor([0.7073, 2.6755, 0.4456, 0.2009, 3.9422, 2.2771, 2.5980, 2.0823, 1.0969, + 0.8215]) +2025-06-27 14:59:03,654 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,654 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,654 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,654 - INFO - The first 10 points pressure for the sample_0: tensor([ -53.3478, -249.1540, -217.1740, -38.3287, -57.3577, -39.5899, + -53.4930, -198.4300, -62.9154, -72.7477]) +2025-06-27 14:59:03,655 - INFO - The first 10 points pressure for the sample_1: tensor([ -69.7244, -171.5290, 77.3724, -124.6980, -16.5869, -37.1627, + -110.4390, -50.8352, -93.0625, 35.6096]) +2025-06-27 14:59:03,662 - INFO - Batch: 16 +2025-06-27 14:59:03,662 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,662 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,662 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,663 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.0866, 3.0906, 2.8814, 3.3540, 0.1802, 3.5031, -0.4595, 2.9398, + -0.6862, 2.8402]) +2025-06-27 14:59:03,663 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 1.3375, 2.6600, 0.9213, -0.2852, 3.8668, 3.6532, 0.0999, -0.0257, + -0.4694, 3.0822]) +2025-06-27 14:59:03,663 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,663 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,663 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,663 - INFO - The first 10 points pressure for the sample_0: tensor([-139.3840, -76.1815, -77.5190, -45.6850, -198.1980, 18.8768, + -231.3870, -84.9075, 368.2710, -40.0785]) +2025-06-27 14:59:03,664 - INFO - The first 10 points pressure for the sample_1: tensor([ -64.3042, -57.6938, -522.9560, 408.5530, -19.7023, -25.3357, + -126.2210, -57.5550, -155.8810, 95.9221]) +2025-06-27 14:59:03,674 - INFO - Batch: 17 +2025-06-27 14:59:03,674 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,674 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,674 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,675 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.1838, 1.5781, -0.1521, 0.9625, 1.6583, 0.0083, -0.0349, 3.1378, + 2.3802, 0.3751]) +2025-06-27 14:59:03,675 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 1.6692, 3.0700, 3.6759, 3.8323, -0.6323, 0.4708, -0.5861, 2.3115, + -0.5990, -0.3932]) +2025-06-27 14:59:03,675 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,675 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,675 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,676 - INFO - The first 10 points pressure for the sample_0: tensor([-211.8480, -64.9082, -183.0390, -291.3150, -68.5615, -128.2960, + -151.9780, -87.9528, -53.8223, 52.7714]) +2025-06-27 14:59:03,676 - INFO - The first 10 points pressure for the sample_1: tensor([-188.8770, -90.7600, -102.9600, -74.7353, -238.1230, 93.8826, + -216.2530, -112.1910, 4.2031, -268.9180]) +2025-06-27 14:59:03,683 - INFO - Batch: 18 +2025-06-27 14:59:03,683 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,683 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,683 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,684 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.4499, 3.2238, 0.0198, -0.8947, 0.1846, 0.3769, 2.5953, 0.1344, + 2.6005, 2.5104]) +2025-06-27 14:59:03,684 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.6021, -0.4711, 0.2979, -0.4244, 0.1350, 3.0414, -0.7052, 2.4948, + 0.1110, 0.1914]) +2025-06-27 14:59:03,684 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,684 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,684 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,685 - INFO - The first 10 points pressure for the sample_0: tensor([ -76.2705, -109.5390, -188.3260, 252.8330, -176.8290, -152.6550, + -65.6192, -174.7490, -241.6180, -97.7168]) +2025-06-27 14:59:03,685 - INFO - The first 10 points pressure for the sample_1: tensor([ -18.4898, -178.5000, -314.8540, -146.0690, -120.9740, -48.0461, + 289.0650, -100.6570, -146.2480, -62.3305]) +2025-06-27 14:59:03,693 - INFO - Batch: 19 +2025-06-27 14:59:03,693 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,693 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,693 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,694 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.6556, 0.7761, 2.6521, -0.1636, 3.1249, -0.1745, 2.7500, 3.6731, + 3.3997, 2.8615]) +2025-06-27 14:59:03,694 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.1495, -0.2890, -0.9955, 0.1877, 2.8025, 2.8040, -0.9255, 3.0113, + 2.6665, 0.9294]) +2025-06-27 14:59:03,694 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,694 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,694 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,695 - INFO - The first 10 points pressure for the sample_0: tensor([ -90.2428, -87.7525, -174.1110, -156.8580, -88.2660, -195.9300, + -256.0310, -8.6092, -122.2230, -52.9257]) +2025-06-27 14:59:03,695 - INFO - The first 10 points pressure for the sample_1: tensor([ -55.0505, -122.1260, 396.7610, -90.2323, -152.7490, -82.5651, + 328.6670, -427.7260, -100.4770, -27.4212]) +2025-06-27 14:59:03,703 - INFO - Batch: 20 +2025-06-27 14:59:03,703 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,703 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,703 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,704 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.7703, 3.6288, -0.0146, 0.1477, 2.8393, 2.8385, 1.3833, 3.0913, + 2.8498, -0.1425]) +2025-06-27 14:59:03,705 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.3972, 3.9183, 3.2692, 3.7785, -0.1782, 2.5976, -0.3481, 3.4233, + 3.6062, 0.2469]) +2025-06-27 14:59:03,705 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,705 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,705 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,705 - INFO - The first 10 points pressure for the sample_0: tensor([ -83.3786, -22.5281, -175.6870, -139.1320, -77.1722, -71.0416, + -81.8121, -42.6166, -128.7640, -126.2550]) +2025-06-27 14:59:03,705 - INFO - The first 10 points pressure for the sample_1: tensor([-201.5640, -16.7136, -106.6490, -42.4794, -175.8160, -8.5266, + -134.5120, -136.8020, -58.7743, -206.3970]) +2025-06-27 14:59:03,713 - INFO - Batch: 21 +2025-06-27 14:59:03,713 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,713 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,714 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,714 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.0842, 2.7824, -0.2797, 2.4688, 0.8248, 1.1311, -0.4703, 0.1026, + 2.5978, 1.4177]) +2025-06-27 14:59:03,715 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.4264, 2.8696, 1.6240, 2.9751, 2.6334, 0.5618, 0.1458, 0.3647, + -0.0344, 0.9135]) +2025-06-27 14:59:03,715 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,715 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,715 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,715 - INFO - The first 10 points pressure for the sample_0: tensor([-185.7600, -93.9326, -159.4260, -93.3238, 33.1694, -555.6100, + -83.0041, 6.0518, -82.7327, -174.1910]) +2025-06-27 14:59:03,715 - INFO - The first 10 points pressure for the sample_1: tensor([ -98.1512, -98.8278, -70.9374, -85.9046, -191.0780, 112.1150, + -187.3720, -214.2010, -247.4480, -105.1760]) +2025-06-27 14:59:03,723 - INFO - Batch: 22 +2025-06-27 14:59:03,724 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,724 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,724 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,724 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.2349, 0.9135, -0.1889, 0.2861, 1.0193, 1.2458, -0.4257, 0.1707, + 2.7322, 2.7668]) +2025-06-27 14:59:03,725 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.9975, -0.4127, 3.8528, 2.7812, 0.0457, 1.7387, -0.1865, 2.7813, + 0.2707, -0.6109]) +2025-06-27 14:59:03,725 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,725 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,725 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,725 - INFO - The first 10 points pressure for the sample_0: tensor([ 132.8470, -74.3528, -155.9420, -174.4330, -338.2300, -73.8055, + -185.3560, -112.9740, -96.7461, -106.6330]) +2025-06-27 14:59:03,726 - INFO - The first 10 points pressure for the sample_1: tensor([-373.3540, -141.6460, 25.1866, -86.2460, -147.9350, -224.0500, + -198.4290, -63.9653, 87.9137, -98.6002]) +2025-06-27 14:59:03,741 - INFO - Batch: 23 +2025-06-27 14:59:03,741 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,741 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,741 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,742 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.4362, 2.1394, -0.8130, -0.2289, -0.2236, 3.2831, -0.0146, -0.4587, + 1.9986, 2.7975]) +2025-06-27 14:59:03,743 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.2382, 2.4836, 0.0085, 1.8302, 2.6476, 3.4547, 2.7562, -0.8593, + 3.8241, 0.0800]) +2025-06-27 14:59:03,743 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,743 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,743 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,743 - INFO - The first 10 points pressure for the sample_0: tensor([-285.9120, -143.0830, 447.8620, -818.7190, -206.1260, -203.6470, + -181.9760, -92.8544, -55.5424, -15.3260]) +2025-06-27 14:59:03,744 - INFO - The first 10 points pressure for the sample_1: tensor([ -97.2788, -92.2684, -135.6480, -96.3057, -67.3315, -145.7940, + -66.4935, 439.0400, -4.7178, -107.6100]) +2025-06-27 14:59:03,751 - INFO - Batch: 24 +2025-06-27 14:59:03,751 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,751 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,751 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,751 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.6553, -0.2362, 3.2621, 3.5150, 3.2970, -0.3015, 1.3146, 3.5571, + 2.3335, 0.9049]) +2025-06-27 14:59:03,752 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.6814, 2.5220, 2.0823, 3.5908, 0.2836, 0.8439, 3.6716, -0.1743, + 2.4019, 2.6665]) +2025-06-27 14:59:03,752 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,752 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,752 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,752 - INFO - The first 10 points pressure for the sample_0: tensor([ -74.1879, -109.2970, -83.0217, -42.2152, -95.3424, -155.8450, + -83.9559, -30.1028, -67.7424, -12.2910]) +2025-06-27 14:59:03,753 - INFO - The first 10 points pressure for the sample_1: tensor([ -7.6114, -59.0061, -47.3247, -30.2943, -39.4249, -351.7790, + -19.8623, -195.5630, -50.2555, -120.2700]) +2025-06-27 14:59:03,760 - INFO - Batch: 25 +2025-06-27 14:59:03,760 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,760 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,761 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,761 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.0024, 2.6824, -0.5860, 3.1021, 0.9962, -0.3762, 0.0654, -0.5734, + 0.3521, -0.9314]) +2025-06-27 14:59:03,761 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 1.0195, 3.0786, -0.9305, 3.6379, -0.0828, 1.6584, 0.9161, 0.9135, + -0.0261, 0.5813]) +2025-06-27 14:59:03,761 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,761 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,761 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,762 - INFO - The first 10 points pressure for the sample_0: tensor([-8.8256e+01, -8.6504e+01, -3.2214e+02, -3.3256e+02, -5.7983e+02, + -8.2140e+01, -1.0871e+02, 4.1819e-02, -1.4809e+02, 2.5899e+02]) +2025-06-27 14:59:03,762 - INFO - The first 10 points pressure for the sample_1: tensor([ -91.5531, -88.4385, 420.0560, -11.4785, -138.2240, -229.8700, + -425.4430, -91.2196, -150.3880, -118.3980]) +2025-06-27 14:59:03,773 - INFO - Batch: 26 +2025-06-27 14:59:03,773 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,773 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,773 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,773 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 3.2496, 0.8104, -0.5261, -0.2329, 0.4323, 3.6518, 3.7327, 1.9652, + -0.4947, -0.5774]) +2025-06-27 14:59:03,774 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.8151, 0.9867, 0.0400, 0.2850, 0.4326, 3.5833, -0.0836, -0.7234, + -0.7741, 1.5676]) +2025-06-27 14:59:03,774 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,774 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,774 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,775 - INFO - The first 10 points pressure for the sample_0: tensor([ 32.1262, -80.0883, -123.9470, -195.3070, -100.2010, 29.2670, + -16.8960, -86.8586, 3.6613, 54.4899]) +2025-06-27 14:59:03,775 - INFO - The first 10 points pressure for the sample_1: tensor([ -59.2862, -21.4684, -132.8340, -111.0530, -52.8657, 50.1366, + -194.1660, 113.6450, 358.9430, -226.3790]) +2025-06-27 14:59:03,781 - INFO - Batch: 27 +2025-06-27 14:59:03,781 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,781 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,781 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,781 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.3280, 0.9479, 0.6844, 0.5470, -0.5607, 0.8677, 2.4142, 1.2802, + 0.9326, 2.3344]) +2025-06-27 14:59:03,782 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.4468, 2.4833, 2.7102, 3.8827, 3.1250, 1.1427, 1.0396, -0.1865, + 2.7696, 0.5240]) +2025-06-27 14:59:03,782 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,782 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,782 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,782 - INFO - The first 10 points pressure for the sample_0: tensor([-172.0560, -57.6567, -102.8640, -106.8040, -207.4080, -38.4566, + -56.6369, -76.8144, -486.2720, -36.1637]) +2025-06-27 14:59:03,783 - INFO - The first 10 points pressure for the sample_1: tensor([ 16.2851, -143.7690, -95.7955, -35.1342, -63.5244, -93.4655, + -68.2843, -181.9770, -75.4459, -66.5618]) +2025-06-27 14:59:03,790 - INFO - Batch: 28 +2025-06-27 14:59:03,790 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,790 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,790 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,791 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.1690, 3.0182, -0.3117, 1.0167, 0.8448, 0.3865, -0.6800, 0.1000, + -0.8942, 2.8359]) +2025-06-27 14:59:03,791 - INFO - The first 10 points in x_coor for the sample_1: tensor([-3.1358e-03, 3.7210e+00, 3.7106e+00, 1.5323e+00, -1.1745e-01, + 1.4169e+00, 2.9957e+00, -2.5156e-02, -2.6220e-02, 1.2272e-01]) +2025-06-27 14:59:03,791 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,791 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,791 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,791 - INFO - The first 10 points pressure for the sample_0: tensor([ -71.8076, -28.2225, -180.3700, -69.3480, -93.5420, -103.9380, + -31.1451, -282.2930, -208.7450, -186.7030]) +2025-06-27 14:59:03,792 - INFO - The first 10 points pressure for the sample_1: tensor([-149.8820, -47.8371, -30.8754, -61.2213, -179.6550, -269.8940, + -65.7033, -205.9630, -144.7190, -148.6480]) +2025-06-27 14:59:03,799 - INFO - Batch: 29 +2025-06-27 14:59:03,799 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,799 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,799 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,799 - INFO - The first 10 points in x_coor for the sample_0: tensor([0.1459, 1.8531, 0.2792, 1.1312, 0.3291, 1.7615, 0.3698, 0.2260, 2.2655, + 0.0992]) +2025-06-27 14:59:03,800 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.2671, 0.0199, 0.9527, 1.0279, 3.4555, 1.6812, 2.3703, -0.1861, + 3.2626, 0.0304]) +2025-06-27 14:59:03,800 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,800 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,800 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,801 - INFO - The first 10 points pressure for the sample_0: tensor([-176.4800, -56.3783, -66.4860, -107.8410, -189.6050, -66.6120, + -43.6896, -199.4750, -179.7610, -173.2540]) +2025-06-27 14:59:03,801 - INFO - The first 10 points pressure for the sample_1: tensor([-137.7900, -144.6510, -590.5480, -509.6860, -54.0269, -106.7610, + -76.1703, -273.7640, -135.9330, -154.3830]) +2025-06-27 14:59:03,807 - INFO - Batch: 30 +2025-06-27 14:59:03,807 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,807 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,807 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,807 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.9479, 0.3987, -0.1521, 1.9906, -0.0118, -0.5948, 2.6331, 0.5215, + 2.7021, 0.5607]) +2025-06-27 14:59:03,808 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.5999, 1.7842, 3.0147, 2.8585, 2.1510, 1.4178, -0.8385, 0.4896, + 3.3658, 2.4531]) +2025-06-27 14:59:03,808 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,808 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,808 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,808 - INFO - The first 10 points pressure for the sample_0: tensor([ -50.8184, 107.2220, -172.7260, -58.7541, -100.8550, -93.4919, + -255.4690, -141.1370, -97.2591, 154.2220]) +2025-06-27 14:59:03,808 - INFO - The first 10 points pressure for the sample_1: tensor([ -92.1932, -75.5878, -113.2290, -145.7610, -83.5426, -248.8180, + 267.4870, -147.7300, -47.9468, -80.9790]) +2025-06-27 14:59:03,815 - INFO - Batch: 31 +2025-06-27 14:59:03,815 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,815 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,816 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,816 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.4635, -0.1174, 0.6167, -0.9828, 1.6812, 1.0847, 1.7729, -0.1520, + 2.5627, 3.0302]) +2025-06-27 14:59:03,816 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.3752, 0.2724, -0.4835, 0.9250, 3.6627, 2.8386, 0.8557, 3.9218, + 3.1210, -0.3901]) +2025-06-27 14:59:03,816 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,816 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,816 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,817 - INFO - The first 10 points pressure for the sample_0: tensor([ -95.1635, -107.9970, 137.8990, -377.8830, -94.7552, -76.7119, + -94.5342, -96.3450, -94.3630, -47.7246]) +2025-06-27 14:59:03,817 - INFO - The first 10 points pressure for the sample_1: tensor([ 26.8221, -139.6480, -68.4088, -78.4813, -92.9359, -102.2890, + -458.8600, -51.0210, -36.5807, -119.5220]) +2025-06-27 14:59:03,824 - INFO - Batch: 32 +2025-06-27 14:59:03,824 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,824 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,824 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,824 - INFO - The first 10 points in x_coor for the sample_0: tensor([2.6552, 2.8621, 3.0570, 2.2725, 2.7130, 3.0448, 0.1232, 2.8604, 3.6765, + 0.7417]) +2025-06-27 14:59:03,824 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.7915, 0.4004, 0.1917, 3.7563, -0.1262, 2.2084, 3.2492, 1.6240, + -0.9586, 0.3788]) +2025-06-27 14:59:03,825 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,825 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,825 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,825 - INFO - The first 10 points pressure for the sample_0: tensor([-142.5580, -140.2040, -94.3418, -85.5900, -165.3290, -8.9998, + -434.2130, 24.8030, -15.0267, -87.2577]) +2025-06-27 14:59:03,826 - INFO - The first 10 points pressure for the sample_1: tensor([ -33.3455, 82.1770, -196.9200, -72.2047, -49.2621, -23.6994, + -41.9599, -72.0221, 442.1610, -72.5108]) +2025-06-27 14:59:03,950 - INFO - Batch: 33 +2025-06-27 14:59:03,951 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,951 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,951 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,951 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.3813, -0.6180, 2.7117, -0.0955, 2.3878, 1.8990, -0.2204, -0.1854, + 2.4366, -0.0807]) +2025-06-27 14:59:03,952 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 1.4519, 0.0771, 0.2691, -0.4843, 2.0249, 3.3312, 3.5800, 2.7106, + 2.6875, -0.5732]) +2025-06-27 14:59:03,952 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,952 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,952 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,952 - INFO - The first 10 points pressure for the sample_0: tensor([-334.7100, -208.6040, -78.3032, -196.7590, -80.4406, -74.0066, + -64.5444, -148.8190, -204.1940, -21.9310]) +2025-06-27 14:59:03,952 - INFO - The first 10 points pressure for the sample_1: tensor([ -52.1155, -159.1580, -123.9140, -49.3530, -202.4540, 45.0232, + -26.4776, -63.7452, -51.6638, -429.7930]) +2025-06-27 14:59:03,960 - INFO - Batch: 34 +2025-06-27 14:59:03,960 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,960 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,961 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,961 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.6435, 3.1134, 1.0625, 1.1242, 0.7875, 0.9040, 2.9080, -0.8353, + -0.2323, 2.7696]) +2025-06-27 14:59:03,961 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.1132, 1.8528, 3.9148, -0.2349, 3.9169, 1.4979, 1.2687, 0.0887, + 1.0622, 3.4802]) +2025-06-27 14:59:03,961 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,961 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,961 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,962 - INFO - The first 10 points pressure for the sample_0: tensor([-122.1790, -83.9981, -116.2120, -243.7820, -167.9570, -46.7163, + -56.8178, 326.6480, -345.4950, -143.3180]) +2025-06-27 14:59:03,962 - INFO - The first 10 points pressure for the sample_1: tensor([ -90.0224, -93.3419, -30.0798, -173.1290, -79.8611, -52.6971, + -87.8026, -31.6445, -235.4820, 70.6987]) +2025-06-27 14:59:03,968 - INFO - Batch: 35 +2025-06-27 14:59:03,968 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,968 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,968 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,969 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.8641, 0.8677, 1.8042, 0.1116, -0.8959, 3.5810, 0.5146, 3.5696, + 1.7614, 2.7114]) +2025-06-27 14:59:03,969 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.0964, 2.4975, 2.4031, 1.5668, -0.8922, 3.7699, 1.2573, 0.2626, + 0.5347, -0.2470]) +2025-06-27 14:59:03,969 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,969 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,969 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,970 - INFO - The first 10 points pressure for the sample_0: tensor([-111.9880, -129.0600, -73.4888, -137.5430, 341.9980, -36.2054, + 107.2540, -18.7434, -79.0948, -57.4639]) +2025-06-27 14:59:03,970 - INFO - The first 10 points pressure for the sample_1: tensor([-118.6720, -88.8445, -147.5880, -248.5680, 302.8140, 0.5286, + -86.7859, -110.4070, 12.7982, -160.6780]) +2025-06-27 14:59:03,977 - INFO - Batch: 36 +2025-06-27 14:59:03,977 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,977 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,977 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,978 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.1916, -0.3633, 0.0083, 0.6492, 1.6015, 3.7575, 2.5296, 2.8156, + 2.3077, 2.9862]) +2025-06-27 14:59:03,978 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.5868, 0.0720, 3.6466, 2.0823, -0.1516, 3.5156, 1.3031, 1.5552, + 2.6781, 2.8837]) +2025-06-27 14:59:03,978 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,978 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,978 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,979 - INFO - The first 10 points pressure for the sample_0: tensor([ -13.6639, -159.2940, -155.5510, 184.0170, -97.3904, -22.4263, + -407.6130, -40.0012, -152.1620, -24.3921]) +2025-06-27 14:59:03,979 - INFO - The first 10 points pressure for the sample_1: tensor([ -96.5537, -158.0490, -23.8407, -73.7478, -86.8836, 4.4890, + -81.7787, -66.9959, -205.5900, -51.5440]) +2025-06-27 14:59:03,987 - INFO - Batch: 37 +2025-06-27 14:59:03,987 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,987 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,988 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,988 - INFO - The first 10 points in x_coor for the sample_0: tensor([2.6894, 0.8658, 1.3604, 1.4868, 3.7887, 0.5710, 0.5009, 2.9438, 2.0263, + 3.3437]) +2025-06-27 14:59:03,989 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.0557, 0.5470, -0.3219, -0.7490, -0.5760, 2.0250, 1.9906, -0.1979, + 0.0991, -0.6017]) +2025-06-27 14:59:03,989 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,989 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,989 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,989 - INFO - The first 10 points pressure for the sample_0: tensor([-103.9700, 370.8800, -77.0428, -54.8544, -41.9449, 105.4260, + -92.5658, -44.6539, -78.3615, -40.8942]) +2025-06-27 14:59:03,989 - INFO - The first 10 points pressure for the sample_1: tensor([ -53.7789, -136.5090, -105.3350, 409.5120, -203.6270, -100.4900, + -98.7160, -204.2220, -43.8484, 83.2925]) +2025-06-27 14:59:03,996 - INFO - Batch: 38 +2025-06-27 14:59:03,997 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:03,997 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:03,997 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:03,997 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.3117, 0.4094, 0.0427, 3.1248, 2.7575, 2.6631, 0.5927, 2.8988, + 1.4521, 1.1771]) +2025-06-27 14:59:03,997 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.3688, -0.0817, 1.5323, 1.9906, 3.0661, 0.7188, 2.2313, 2.3115, + 3.2123, 0.4782]) +2025-06-27 14:59:03,998 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:03,998 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:03,998 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:03,998 - INFO - The first 10 points pressure for the sample_0: tensor([-268.5310, -283.7720, -251.0730, -140.5760, -87.6902, -242.1250, + -80.3776, -84.8839, -90.4834, -90.0195]) +2025-06-27 14:59:03,998 - INFO - The first 10 points pressure for the sample_1: tensor([-126.9870, -168.7350, -169.6220, -70.4165, -45.4687, -118.2910, + -72.3663, -65.5506, -84.8359, -71.2502]) +2025-06-27 14:59:04,004 - INFO - Batch: 39 +2025-06-27 14:59:04,005 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:04,005 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:04,005 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:04,005 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.6710, 1.9803, 1.3950, 2.8383, 0.8219, 1.8760, 0.0427, 0.6495, + 0.3333, 2.8042]) +2025-06-27 14:59:04,005 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.3143, 2.9865, 2.9771, 2.9417, 1.0311, 3.5053, 1.4050, 0.1797, + 2.7006, -0.0968]) +2025-06-27 14:59:04,006 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:04,006 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:04,006 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:04,006 - INFO - The first 10 points pressure for the sample_0: tensor([ -95.5753, -94.2381, -310.8570, -109.1800, -81.4626, -70.8335, + -153.2640, -75.0069, 33.6827, -70.6522]) +2025-06-27 14:59:04,006 - INFO - The first 10 points pressure for the sample_1: tensor([-175.4440, -149.5730, -123.9650, -90.3130, -144.0810, -50.9281, + -253.8000, -57.1164, -173.6610, -177.1350]) +2025-06-27 14:59:04,016 - INFO - Batch: 40 +2025-06-27 14:59:04,016 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:04,016 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:04,016 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:04,017 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.7469, 2.7587, 0.7073, 2.7011, 3.4110, 2.1052, 2.5066, 3.0369, + -0.5161, 2.3114]) +2025-06-27 14:59:04,017 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.4209, 0.4208, 1.7844, -0.3690, 0.4459, 1.6354, 3.5461, 2.6436, + 1.8417, 1.7042]) +2025-06-27 14:59:04,017 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:04,017 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:04,017 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:04,017 - INFO - The first 10 points pressure for the sample_0: tensor([ -71.5105, -83.9507, -115.8100, -88.5756, -51.6981, -55.6062, + -33.3012, -96.3630, -220.3480, -136.4290]) +2025-06-27 14:59:04,018 - INFO - The first 10 points pressure for the sample_1: tensor([ -49.1153, -110.7010, -76.5956, -86.4747, 126.2600, -100.9660, + -14.0673, -63.5962, -83.5589, -76.9001]) +2025-06-27 14:59:04,024 - INFO - Batch: 41 +2025-06-27 14:59:04,024 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 14:59:04,024 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 14:59:04,024 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 14:59:04,025 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 3.1197, -0.7715, 4.0171, 3.3372, 0.6860, 1.1312, 0.2249, -0.3013, + 4.0153, 3.0571]) +2025-06-27 14:59:04,025 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.2488, -0.1749, -0.5541, 3.5946, 3.1490, -0.4508, 2.4746, 1.0854, + 1.8416, 1.2000]) +2025-06-27 14:59:04,025 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 14:59:04,025 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 14:59:04,025 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 14:59:04,025 - INFO - The first 10 points pressure for the sample_0: tensor([-139.8260, 26.5946, -19.0641, -114.9630, 187.9530, -74.7365, + 26.5352, -145.7950, -19.5303, -161.1850]) +2025-06-27 14:59:04,026 - INFO - The first 10 points pressure for the sample_1: tensor([ 43.5840, -199.3970, -144.1050, 10.0279, -68.8296, -107.1940, + -66.7033, -66.0018, -68.5476, -84.6221]) +2025-06-27 14:59:04,901 - INFO - Type of train_subset: +2025-06-27 14:59:04,901 - INFO - Number of samples of train_subset : 85 +2025-06-27 14:59:04,901 - INFO - Subset indices: [0, 1, 2, 4, 6] +2025-06-27 14:59:04,901 - INFO - List the train_subset vtk files: +2025-06-27 14:59:04,901 - INFO - 0: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_073.vtk +2025-06-27 14:59:04,901 - INFO - 1: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_051.vtk +2025-06-27 14:59:04,901 - INFO - 2: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_099.vtk +2025-06-27 14:59:04,901 - INFO - 3: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_164.vtk +2025-06-27 14:59:04,901 - INFO - 4: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_030.vtk +2025-06-27 14:59:04,901 - INFO - 5: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_132.vtk +2025-06-27 14:59:04,902 - INFO - 6: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_107.vtk +2025-06-27 14:59:04,902 - INFO - 7: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_186.vtk +2025-06-27 14:59:04,902 - INFO - 8: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_079.vtk +2025-06-27 14:59:04,902 - INFO - 9: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_018.vtk +2025-06-27 14:59:04,902 - INFO - 10: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_015.vtk +2025-06-27 14:59:04,902 - INFO - 11: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_191.vtk +2025-06-27 14:59:04,902 - INFO - 12: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_084.vtk +2025-06-27 14:59:04,902 - INFO - 13: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_129.vtk +2025-06-27 14:59:04,902 - INFO - 14: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_080.vtk +2025-06-27 14:59:04,902 - INFO - 15: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_010.vtk +2025-06-27 14:59:04,902 - INFO - 16: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_094.vtk +2025-06-27 14:59:04,902 - INFO - 17: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_035.vtk +2025-06-27 14:59:04,902 - INFO - 18: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk +2025-06-27 14:59:04,902 - INFO - 19: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_142.vtk +2025-06-27 14:59:04,902 - INFO - 20: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk +2025-06-27 14:59:04,902 - INFO - 21: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_024.vtk +2025-06-27 14:59:04,902 - INFO - 22: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_106.vtk +2025-06-27 14:59:04,902 - INFO - 23: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_161.vtk +2025-06-27 14:59:04,902 - INFO - 24: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_171.vtk +2025-06-27 14:59:04,902 - INFO - 25: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_156.vtk +2025-06-27 14:59:04,903 - INFO - 26: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_053.vtk +2025-06-27 14:59:04,903 - INFO - 27: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_059.vtk +2025-06-27 14:59:04,903 - INFO - 28: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_082.vtk +2025-06-27 14:59:04,903 - INFO - 29: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_109.vtk +2025-06-27 14:59:04,903 - INFO - 30: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_139.vtk +2025-06-27 14:59:04,903 - INFO - 31: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_055.vtk +2025-06-27 14:59:04,903 - INFO - 32: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_088.vtk +2025-06-27 14:59:04,903 - INFO - 33: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_174.vtk +2025-06-27 14:59:04,903 - INFO - 34: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_104.vtk +2025-06-27 14:59:04,903 - INFO - 35: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_014.vtk +2025-06-27 14:59:04,903 - INFO - 36: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_072.vtk +2025-06-27 14:59:04,903 - INFO - 37: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_146.vtk +2025-06-27 14:59:04,904 - INFO - 38: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_194.vtk +2025-06-27 14:59:04,904 - INFO - 39: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_066.vtk +2025-06-27 14:59:04,904 - INFO - 40: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_067.vtk +2025-06-27 14:59:04,904 - INFO - 41: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_130.vtk +2025-06-27 14:59:04,904 - INFO - 42: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_131.vtk +2025-06-27 14:59:04,904 - INFO - 43: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_182.vtk +2025-06-27 14:59:04,904 - INFO - 44: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_049.vtk +2025-06-27 14:59:04,904 - INFO - 45: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_022.vtk +2025-06-27 14:59:04,904 - INFO - 46: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_042.vtk +2025-06-27 14:59:04,904 - INFO - 47: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_036.vtk +2025-06-27 14:59:04,904 - INFO - 48: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_137.vtk +2025-06-27 14:59:04,904 - INFO - 49: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_187.vtk +2025-06-27 14:59:04,904 - INFO - 50: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_195.vtk +2025-06-27 14:59:04,904 - INFO - 51: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_180.vtk +2025-06-27 14:59:04,904 - INFO - 52: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_119.vtk +2025-06-27 14:59:04,905 - INFO - 53: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_169.vtk +2025-06-27 14:59:04,905 - INFO - 54: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_092.vtk +2025-06-27 14:59:04,905 - INFO - 55: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk +2025-06-27 14:59:04,905 - INFO - 56: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_021.vtk +2025-06-27 14:59:04,905 - INFO - 57: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_192.vtk +2025-06-27 14:59:04,905 - INFO - 58: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_100.vtk +2025-06-27 14:59:04,905 - INFO - 59: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_108.vtk +2025-06-27 14:59:04,905 - INFO - 60: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_093.vtk +2025-06-27 14:59:04,905 - INFO - 61: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_149.vtk +2025-06-27 14:59:04,905 - INFO - 62: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_060.vtk +2025-06-27 14:59:04,905 - INFO - 63: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_027.vtk +2025-06-27 14:59:04,905 - INFO - 64: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_061.vtk +2025-06-27 14:59:04,905 - INFO - 65: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_096.vtk +2025-06-27 14:59:04,905 - INFO - 66: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_039.vtk +2025-06-27 14:59:04,905 - INFO - 67: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_118.vtk +2025-06-27 14:59:04,905 - INFO - 68: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_122.vtk +2025-06-27 14:59:04,905 - INFO - 69: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_153.vtk +2025-06-27 14:59:04,905 - INFO - 70: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_144.vtk +2025-06-27 14:59:04,905 - INFO - 71: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_138.vtk +2025-06-27 14:59:04,905 - INFO - 72: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_068.vtk +2025-06-27 14:59:04,905 - INFO - 73: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_115.vtk +2025-06-27 14:59:04,905 - INFO - 74: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_028.vtk +2025-06-27 14:59:04,905 - INFO - 75: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_136.vtk +2025-06-27 14:59:04,905 - INFO - 76: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_085.vtk +2025-06-27 14:59:04,905 - INFO - 77: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_097.vtk +2025-06-27 14:59:04,906 - INFO - 78: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_140.vtk +2025-06-27 14:59:04,906 - INFO - 79: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_190.vtk +2025-06-27 14:59:04,906 - INFO - 80: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_150.vtk +2025-06-27 14:59:04,906 - INFO - 81: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_170.vtk +2025-06-27 14:59:04,906 - INFO - 82: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_095.vtk +2025-06-27 14:59:04,906 - INFO - 83: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_071.vtk +2025-06-27 14:59:04,906 - INFO - 84: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_155.vtk +2025-06-27 14:59:04,906 - INFO - Type of full_dataset: +2025-06-27 14:59:04,906 - INFO - Number of samples of full_dataset: 130 +2025-06-27 14:59:04,906 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_073.vtk: 0 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_051.vtk: 1 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_099.vtk: 2 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_009.vtk: 3 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_164.vtk: 4 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_166.vtk: 5 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_030.vtk: 6 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_132.vtk: 7 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_114.vtk: 8 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_107.vtk: 9 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_186.vtk: 10 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_175.vtk: 11 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_079.vtk: 12 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_018.vtk: 13 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_015.vtk: 14 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_191.vtk: 15 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_084.vtk: 16 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_032.vtk: 17 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_129.vtk: 18 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_080.vtk: 19 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_010.vtk: 20 +2025-06-27 14:59:04,907 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_094.vtk: 21 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_078.vtk: 22 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_035.vtk: 23 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk: 24 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_020.vtk: 25 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_142.vtk: 26 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk: 27 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_112.vtk: 28 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_125.vtk: 29 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_024.vtk: 30 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_106.vtk: 31 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_161.vtk: 32 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_171.vtk: 33 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_156.vtk: 34 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_053.vtk: 35 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_160.vtk: 36 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_059.vtk: 37 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_082.vtk: 38 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_109.vtk: 39 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_026.vtk: 40 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_139.vtk: 41 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_055.vtk: 42 +2025-06-27 14:59:04,908 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_088.vtk: 43 +2025-06-27 14:59:04,909 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_174.vtk: 44 +2025-06-27 14:59:04,909 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_104.vtk: 45 +2025-06-27 14:59:04,909 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_168.vtk: 46 +2025-06-27 14:59:04,909 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_014.vtk: 47 +2025-06-27 14:59:04,909 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_072.vtk: 48 +2025-06-27 14:59:04,909 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_147.vtk: 49 +2025-06-27 14:59:04,909 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_146.vtk: 50 +2025-06-27 14:59:04,909 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_194.vtk: 51 +2025-06-27 14:59:04,909 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_141.vtk: 52 +2025-06-27 14:59:04,909 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_002.vtk: 53 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_066.vtk: 54 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_050.vtk: 55 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_067.vtk: 56 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_130.vtk: 57 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_025.vtk: 58 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_019.vtk: 59 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_131.vtk: 60 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_182.vtk: 61 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_049.vtk: 62 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_022.vtk: 63 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_042.vtk: 64 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_177.vtk: 65 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_036.vtk: 66 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_137.vtk: 67 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_187.vtk: 68 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_195.vtk: 69 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_180.vtk: 70 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_178.vtk: 71 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_119.vtk: 72 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_169.vtk: 73 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_189.vtk: 74 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_092.vtk: 75 +2025-06-27 14:59:04,910 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_116.vtk: 76 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk: 77 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_029.vtk: 78 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_021.vtk: 79 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_192.vtk: 80 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_100.vtk: 81 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_087.vtk: 82 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_108.vtk: 83 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_093.vtk: 84 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_034.vtk: 85 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_159.vtk: 86 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_089.vtk: 87 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_065.vtk: 88 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_149.vtk: 89 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_075.vtk: 90 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_060.vtk: 91 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_148.vtk: 92 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_027.vtk: 93 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_061.vtk: 94 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_096.vtk: 95 +2025-06-27 14:59:04,911 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_039.vtk: 96 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_058.vtk: 97 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_118.vtk: 98 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_091.vtk: 99 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_057.vtk: 100 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_122.vtk: 101 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_153.vtk: 102 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_144.vtk: 103 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_138.vtk: 104 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_068.vtk: 105 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_115.vtk: 106 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_135.vtk: 107 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_044.vtk: 108 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_028.vtk: 109 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_136.vtk: 110 +2025-06-27 14:59:04,912 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_085.vtk: 111 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_097.vtk: 112 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_121.vtk: 113 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_017.vtk: 114 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_140.vtk: 115 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_190.vtk: 116 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_023.vtk: 117 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_047.vtk: 118 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_150.vtk: 119 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_070.vtk: 120 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_170.vtk: 121 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_110.vtk: 122 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_081.vtk: 123 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_172.vtk: 124 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_095.vtk: 125 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_071.vtk: 126 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_117.vtk: 127 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_155.vtk: 128 +2025-06-27 14:59:04,913 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_043.vtk: 129 +2025-06-27 14:59:04,913 - INFO - Data loaded: 42 training batches, 10 validation batches, 12 test batches +2025-06-27 14:59:04,916 - INFO - Staring training for 10 epochs +2025-06-27 14:59:40,988 - INFO - args.exp_name : Train_Test +2025-06-27 14:59:40,988 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 14:59:40,988 - INFO - Starting training with 1 GPUs +2025-06-27 14:59:57,654 - INFO - Total trainable parameters: 1437705 +2025-06-27 14:59:57,800 - INFO - Type of train_dataloader: +2025-06-27 14:59:57,801 - INFO - Number of train_dataloader: 42 +2025-06-27 14:59:57,801 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 15:01:35,623 - INFO - args.exp_name : Train_Test +2025-06-27 15:01:35,624 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Cache_data', num_points=10000, batch_size=2, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 15:01:35,624 - INFO - Starting training with 1 GPUs +2025-06-27 15:01:38,313 - INFO - Total trainable parameters: 1437705 +2025-06-27 15:01:38,364 - INFO - Type of train_dataloader: +2025-06-27 15:01:38,364 - INFO - Number of train_dataloader: 42 +2025-06-27 15:01:38,364 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 15:01:42,229 - INFO - Batch: 0 +2025-06-27 15:01:42,229 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,229 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,229 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,235 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.0485, -0.5382, 0.8928, -0.5284, 0.1444, 2.6208, 2.9762, 3.6200, + -0.3787, 1.3031]) +2025-06-27 15:01:42,235 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.2833, 2.6895, 2.9486, 1.6583, -0.7939, 2.0023, 3.1221, -0.4969, + 1.6010, 3.6084]) +2025-06-27 15:01:42,235 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,235 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,235 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,236 - INFO - The first 10 points pressure for the sample_0: tensor([-338.8030, -198.2250, -328.4480, -501.5070, -109.0920, -77.5971, + -91.8080, -29.4908, -284.0090, -91.4154]) +2025-06-27 15:01:42,236 - INFO - The first 10 points pressure for the sample_1: tensor([-184.4790, -81.4952, -111.3050, -65.2745, 262.2230, -241.2780, + -14.4769, -133.4740, -90.2832, -18.6696]) +2025-06-27 15:01:42,237 - INFO - Batch: 1 +2025-06-27 15:01:42,237 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,237 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,237 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,238 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.3513, 1.2264, 2.3647, 0.1573, -0.5166, 0.6042, 2.8162, 3.3206, + 2.9534, 3.4002]) +2025-06-27 15:01:42,238 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.3273, 2.5158, 1.0972, 3.1233, 2.9517, 0.5927, 1.0853, 2.0020, + 1.7042, 1.0168]) +2025-06-27 15:01:42,238 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,238 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,241 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,241 - INFO - The first 10 points pressure for the sample_0: tensor([-148.8720, -323.1080, -107.6920, -145.9240, -122.9890, -107.0480, + -73.7163, -52.5742, -81.2839, -54.2815]) +2025-06-27 15:01:42,243 - INFO - The first 10 points pressure for the sample_1: tensor([-162.1830, -182.7360, -78.5279, -28.9796, -74.6324, -160.5080, + -74.9221, -181.3020, -57.2525, -253.2530]) +2025-06-27 15:01:42,259 - INFO - Batch: 2 +2025-06-27 15:01:42,259 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,259 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,259 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,260 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.6993, 2.4030, 0.8899, 2.0135, 3.2732, 2.8507, -0.0716, 1.6125, + 2.9042, 0.3750]) +2025-06-27 15:01:42,260 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.1687, 0.1376, 1.5781, 2.6907, -0.8913, 3.2266, -0.2999, 1.4406, + 3.1044, 0.3440]) +2025-06-27 15:01:42,260 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,260 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,260 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,261 - INFO - The first 10 points pressure for the sample_0: tensor([-208.3020, -73.5788, -553.2300, -180.9300, -24.8334, -148.6590, + -183.7780, -86.4613, -121.7070, -128.1350]) +2025-06-27 15:01:42,261 - INFO - The first 10 points pressure for the sample_1: tensor([-165.9060, -261.3880, -70.9110, -124.4570, 266.7510, -80.9190, + -47.6652, -66.3573, -81.4829, -123.6120]) +2025-06-27 15:01:42,271 - INFO - Batch: 3 +2025-06-27 15:01:42,271 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,271 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,271 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,272 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.6583, 2.0797, -0.6899, 2.9531, -0.5864, 2.2427, 1.3490, -0.5493, + -0.0602, -0.4740]) +2025-06-27 15:01:42,272 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.3746, 1.6812, 2.2774, 1.6584, 1.9345, 2.5699, -0.2872, 1.8989, + 0.3043, -0.3038]) +2025-06-27 15:01:42,273 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,273 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,273 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,273 - INFO - The first 10 points pressure for the sample_0: tensor([ -57.7625, -188.2780, -41.3928, -56.4165, -216.8250, -48.8867, + -71.5313, -197.1700, -160.1030, -137.5480]) +2025-06-27 15:01:42,273 - INFO - The first 10 points pressure for the sample_1: tensor([ -94.1245, -81.8525, -96.1851, -76.5446, -79.7430, -61.4083, + -213.5430, -109.7750, -182.5630, 287.5700]) +2025-06-27 15:01:42,295 - INFO - Batch: 4 +2025-06-27 15:01:42,295 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,295 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,295 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,296 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.2684, 0.2005, 2.4938, -0.8517, -0.1792, 1.2573, 3.1068, 3.0116, + 0.9479, 1.2227]) +2025-06-27 15:01:42,296 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.2343, 1.0350, -0.3204, 0.1797, 1.8875, 0.1917, -0.0378, 2.6067, + 2.1969, 1.3375]) +2025-06-27 15:01:42,296 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,296 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,296 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,297 - INFO - The first 10 points pressure for the sample_0: tensor([-173.1780, -202.8530, -80.9509, 327.1650, -128.7190, -326.8660, + -40.6500, -19.0033, -106.4500, -370.2060]) +2025-06-27 15:01:42,297 - INFO - The first 10 points pressure for the sample_1: tensor([-131.4480, -711.7170, -99.5009, 28.7482, -64.0691, -150.5970, + -155.0720, -118.3460, -41.2665, -54.1201]) +2025-06-27 15:01:42,305 - INFO - Batch: 5 +2025-06-27 15:01:42,305 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,305 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,305 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,306 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.5521, 2.6666, 0.5497, 0.1573, 2.8155, 2.9984, -0.1173, 2.5646, + -0.9318, 0.5025]) +2025-06-27 15:01:42,306 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.3631, 3.3885, 0.4810, -0.2440, 1.7385, -0.4245, -0.0946, -0.6986, + 2.2198, 2.6981]) +2025-06-27 15:01:42,306 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,306 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,307 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,307 - INFO - The first 10 points pressure for the sample_0: tensor([ -84.8091, -77.7356, 97.0457, -153.8820, -132.9120, -61.9000, + -132.3560, -76.8720, 436.0530, 81.3881]) +2025-06-27 15:01:42,307 - INFO - The first 10 points pressure for the sample_1: tensor([-102.3700, -15.2985, 80.3081, -129.9610, -95.3753, -90.2176, + -133.5760, 22.5415, -142.7650, -88.9344]) +2025-06-27 15:01:42,314 - INFO - Batch: 6 +2025-06-27 15:01:42,314 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,314 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,314 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,314 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.6010, 1.1225, 1.3490, 2.9752, 0.3980, 1.9333, -0.0454, 3.7736, + 0.4439, 2.0478]) +2025-06-27 15:01:42,314 - INFO - The first 10 points in x_coor for the sample_1: tensor([0.2603, 2.6326, 0.7668, 0.3881, 0.9500, 0.7333, 2.7462, 3.3095, 0.7302, + 1.3604]) +2025-06-27 15:01:42,315 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,315 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,315 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,315 - INFO - The first 10 points pressure for the sample_0: tensor([ -72.9790, -519.1740, -101.6530, -85.5379, 82.8267, -191.4840, + -87.2200, -25.2302, -84.1994, -76.7869]) +2025-06-27 15:01:42,315 - INFO - The first 10 points pressure for the sample_1: tensor([-356.8900, -64.7882, 63.8028, -26.0867, -324.4070, 48.0206, + -399.5770, -150.6780, -78.1401, -75.5552]) +2025-06-27 15:01:42,334 - INFO - Batch: 7 +2025-06-27 15:01:42,335 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,335 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,335 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,335 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.5431e+00, 3.2020e+00, 3.6874e+00, 1.5438e+00, 5.2393e-01, + 2.0708e+00, 3.3398e+00, 1.2687e+00, 3.7581e+00, -3.0862e-03]) +2025-06-27 15:01:42,336 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.3220, 0.0083, 3.5499, 2.8617, 1.1195, 3.0793, -0.1098, 2.8404, + 2.3572, 3.3744]) +2025-06-27 15:01:42,336 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,336 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,336 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,336 - INFO - The first 10 points pressure for the sample_0: tensor([ -41.2135, -13.3389, -55.2665, -199.9300, -107.1490, -49.8250, + -57.6707, -68.3031, -34.3428, -156.1310]) +2025-06-27 15:01:42,337 - INFO - The first 10 points pressure for the sample_1: tensor([-186.6740, -176.6460, 28.5065, -86.4264, -184.2440, -153.2230, + -161.5000, 55.2674, -68.8246, 50.5586]) +2025-06-27 15:01:42,345 - INFO - Batch: 8 +2025-06-27 15:01:42,345 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,345 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,345 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,346 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.2492, 0.2958, 0.0304, 0.3405, -0.1605, 2.1281, 0.0187, 1.7500, + 0.7300, 1.3520]) +2025-06-27 15:01:42,347 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.2083, 2.9885, 3.2353, 1.7729, 3.8352, 3.5629, 0.2343, -0.1897, + -0.1860, 0.8676]) +2025-06-27 15:01:42,347 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,347 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,347 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,347 - INFO - The first 10 points pressure for the sample_0: tensor([-399.0650, -154.6710, -195.3250, -35.4192, -752.6250, -166.1940, + -32.8515, -81.4531, 85.0086, -146.5700]) +2025-06-27 15:01:42,348 - INFO - The first 10 points pressure for the sample_1: tensor([ -85.3987, -75.0366, -75.1776, -76.2214, -25.3767, -48.0276, + -294.6700, -621.4550, -99.7808, -125.2340]) +2025-06-27 15:01:42,356 - INFO - Batch: 9 +2025-06-27 15:01:42,356 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,356 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,356 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,357 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.6813, 0.9128, 2.3372, 2.3220, 2.5514, 2.5759, 2.3228, 2.6669, + -0.1183, 0.7985]) +2025-06-27 15:01:42,357 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.8912, 0.3833, 2.2885, -0.3175, 3.1891, -0.0260, -0.5542, 1.2801, + 3.1246, 2.3803]) +2025-06-27 15:01:42,357 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,357 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,357 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,358 - INFO - The first 10 points pressure for the sample_0: tensor([ -74.5930, -468.5290, -17.8253, -111.4660, -279.8660, -89.8917, + -81.7025, -230.2680, -153.5960, 18.8863]) +2025-06-27 15:01:42,358 - INFO - The first 10 points pressure for the sample_1: tensor([ -62.1802, 95.3592, -73.3089, -172.8170, -41.2741, -161.2840, + -57.3265, -262.5780, -101.2380, -39.0603]) +2025-06-27 15:01:42,365 - INFO - Batch: 10 +2025-06-27 15:01:42,365 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,365 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,365 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,366 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.0061, 0.3184, 1.2802, 1.1238, 2.8385, -0.5982, 2.9736, 3.1936, + 2.8499, -0.7896]) +2025-06-27 15:01:42,366 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.7120, -0.3803, 2.9076, 2.5315, 1.9188, -0.3108, 3.8506, 0.5791, + 2.8501, 0.2604]) +2025-06-27 15:01:42,366 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,366 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,366 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,367 - INFO - The first 10 points pressure for the sample_0: tensor([-119.4090, -7.9263, -97.0791, -215.6410, -71.9722, -38.5220, + -76.4743, 1.0474, -67.6804, 296.0490]) +2025-06-27 15:01:42,367 - INFO - The first 10 points pressure for the sample_1: tensor([-112.9390, -178.6530, -134.2990, -31.5704, -71.9506, -192.3010, + -16.1056, 163.5570, -113.4070, -174.5600]) +2025-06-27 15:01:42,395 - INFO - Batch: 11 +2025-06-27 15:01:42,395 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,396 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,396 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,396 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.1075, 3.8310, 2.6669, 2.5005, 0.5240, -0.3776, 0.1010, -0.0613, + 0.8792, 2.9784]) +2025-06-27 15:01:42,397 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.8087, 0.8565, 2.6422, 2.0250, -0.7496, -0.1292, 0.1458, 0.0684, + 1.5663, -0.0601]) +2025-06-27 15:01:42,397 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,397 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,397 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,397 - INFO - The first 10 points pressure for the sample_0: tensor([-159.9450, -22.9807, -89.1701, -74.5420, -94.6650, -170.8510, + -145.5740, -175.5390, -95.2538, -97.0740]) +2025-06-27 15:01:42,397 - INFO - The first 10 points pressure for the sample_1: tensor([ -69.0255, -82.4216, -41.9460, -69.3559, 318.6280, -154.0390, + 65.4091, -241.1140, -95.6229, -201.5230]) +2025-06-27 15:01:42,403 - INFO - Batch: 12 +2025-06-27 15:01:42,404 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,404 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,404 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,404 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.4883, 3.8665, 3.0949, 1.4288, 0.8677, 1.4062, -0.1070, -0.1792, + -0.7087, 1.6583]) +2025-06-27 15:01:42,404 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 1.4521, -0.3804, 2.6021, 2.8123, 0.8792, -0.3793, 0.6042, -0.6245, + 0.3771, 0.4348]) +2025-06-27 15:01:42,405 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,405 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,405 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,405 - INFO - The first 10 points pressure for the sample_0: tensor([ -86.7106, -19.0995, -112.9440, -238.0560, -190.9050, -137.6680, + -31.7392, -184.7600, -52.8803, -75.6506]) +2025-06-27 15:01:42,405 - INFO - The first 10 points pressure for the sample_1: tensor([ -74.8531, -72.0747, -79.8344, -177.5810, -144.1570, -77.6853, + -70.6408, -160.1670, -128.4620, 102.3960]) +2025-06-27 15:01:42,413 - INFO - Batch: 13 +2025-06-27 15:01:42,413 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,413 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,414 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,414 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.1904, 1.5466, 0.2231, -0.7438, -0.6833, 2.7010, 3.1388, -0.5140, + 0.0188, 0.3035]) +2025-06-27 15:01:42,415 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.9523, 3.2254, -0.8287, 2.6178, 0.3874, -0.4131, 1.7728, 2.4983, + -0.7710, 2.7925]) +2025-06-27 15:01:42,415 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,415 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,415 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,415 - INFO - The first 10 points pressure for the sample_0: tensor([ -11.0432, -181.9940, -209.9080, 334.9540, -21.7079, -117.5570, + -177.2140, -252.7250, -114.8210, -136.9090]) +2025-06-27 15:01:42,416 - INFO - The first 10 points pressure for the sample_1: tensor([ 376.2220, -66.3437, -79.2989, -83.8929, 84.3352, -319.6010, + -209.7620, -110.9740, -149.2000, -128.4940]) +2025-06-27 15:01:42,441 - INFO - Batch: 14 +2025-06-27 15:01:42,442 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,442 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,442 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,443 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.9188, 1.9219, -0.2439, -0.2003, 3.6165, 3.7250, -0.6370, -0.1329, + 1.4949, 1.0856]) +2025-06-27 15:01:42,443 - INFO - The first 10 points in x_coor for the sample_1: tensor([0.9479, 0.2294, 0.2146, 1.4291, 2.4146, 0.3630, 3.1962, 3.5604, 2.1624, + 2.9073]) +2025-06-27 15:01:42,443 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,443 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,443 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,444 - INFO - The first 10 points pressure for the sample_0: tensor([ -63.7926, -202.4660, -114.2120, -170.5030, -25.9001, -14.2671, + 202.1390, -156.5060, -74.7689, -302.7870]) +2025-06-27 15:01:42,444 - INFO - The first 10 points pressure for the sample_1: tensor([-101.0460, -168.4900, -162.7870, -163.8160, -32.0137, -122.1340, + -70.1305, 61.3782, -206.4840, -77.4960]) +2025-06-27 15:01:42,451 - INFO - Batch: 15 +2025-06-27 15:01:42,451 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,451 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,451 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,451 - INFO - The first 10 points in x_coor for the sample_0: tensor([1.6354, 2.7358, 3.0108, 2.9425, 0.5582, 3.3426, 1.6927, 2.6554, 1.1885, + 2.9646]) +2025-06-27 15:01:42,452 - INFO - The first 10 points in x_coor for the sample_1: tensor([0.7073, 2.6755, 0.4456, 0.2009, 3.9422, 2.2771, 2.5980, 2.0823, 1.0969, + 0.8215]) +2025-06-27 15:01:42,452 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,452 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,452 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,452 - INFO - The first 10 points pressure for the sample_0: tensor([ -53.3478, -249.1540, -217.1740, -38.3287, -57.3577, -39.5899, + -53.4930, -198.4300, -62.9154, -72.7477]) +2025-06-27 15:01:42,452 - INFO - The first 10 points pressure for the sample_1: tensor([ -69.7244, -171.5290, 77.3724, -124.6980, -16.5869, -37.1627, + -110.4390, -50.8352, -93.0625, 35.6096]) +2025-06-27 15:01:42,458 - INFO - Batch: 16 +2025-06-27 15:01:42,459 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,459 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,459 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,459 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.0866, 3.0906, 2.8814, 3.3540, 0.1802, 3.5031, -0.4595, 2.9398, + -0.6862, 2.8402]) +2025-06-27 15:01:42,460 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 1.3375, 2.6600, 0.9213, -0.2852, 3.8668, 3.6532, 0.0999, -0.0257, + -0.4694, 3.0822]) +2025-06-27 15:01:42,460 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,460 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,460 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,460 - INFO - The first 10 points pressure for the sample_0: tensor([-139.3840, -76.1815, -77.5190, -45.6850, -198.1980, 18.8768, + -231.3870, -84.9075, 368.2710, -40.0785]) +2025-06-27 15:01:42,460 - INFO - The first 10 points pressure for the sample_1: tensor([ -64.3042, -57.6938, -522.9560, 408.5530, -19.7023, -25.3357, + -126.2210, -57.5550, -155.8810, 95.9221]) +2025-06-27 15:01:42,470 - INFO - Batch: 17 +2025-06-27 15:01:42,470 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,471 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,471 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,472 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.1838, 1.5781, -0.1521, 0.9625, 1.6583, 0.0083, -0.0349, 3.1378, + 2.3802, 0.3751]) +2025-06-27 15:01:42,472 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 1.6692, 3.0700, 3.6759, 3.8323, -0.6323, 0.4708, -0.5861, 2.3115, + -0.5990, -0.3932]) +2025-06-27 15:01:42,472 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,472 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,472 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,472 - INFO - The first 10 points pressure for the sample_0: tensor([-211.8480, -64.9082, -183.0390, -291.3150, -68.5615, -128.2960, + -151.9780, -87.9528, -53.8223, 52.7714]) +2025-06-27 15:01:42,473 - INFO - The first 10 points pressure for the sample_1: tensor([-188.8770, -90.7600, -102.9600, -74.7353, -238.1230, 93.8826, + -216.2530, -112.1910, 4.2031, -268.9180]) +2025-06-27 15:01:42,478 - INFO - Batch: 18 +2025-06-27 15:01:42,478 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,479 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,479 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,479 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.4499, 3.2238, 0.0198, -0.8947, 0.1846, 0.3769, 2.5953, 0.1344, + 2.6005, 2.5104]) +2025-06-27 15:01:42,479 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.6021, -0.4711, 0.2979, -0.4244, 0.1350, 3.0414, -0.7052, 2.4948, + 0.1110, 0.1914]) +2025-06-27 15:01:42,479 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,479 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,480 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,480 - INFO - The first 10 points pressure for the sample_0: tensor([ -76.2705, -109.5390, -188.3260, 252.8330, -176.8290, -152.6550, + -65.6192, -174.7490, -241.6180, -97.7168]) +2025-06-27 15:01:42,480 - INFO - The first 10 points pressure for the sample_1: tensor([ -18.4898, -178.5000, -314.8540, -146.0690, -120.9740, -48.0461, + 289.0650, -100.6570, -146.2480, -62.3305]) +2025-06-27 15:01:42,486 - INFO - Batch: 19 +2025-06-27 15:01:42,486 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,487 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,487 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,487 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.6556, 0.7761, 2.6521, -0.1636, 3.1249, -0.1745, 2.7500, 3.6731, + 3.3997, 2.8615]) +2025-06-27 15:01:42,487 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.1495, -0.2890, -0.9955, 0.1877, 2.8025, 2.8040, -0.9255, 3.0113, + 2.6665, 0.9294]) +2025-06-27 15:01:42,488 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,488 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,488 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,488 - INFO - The first 10 points pressure for the sample_0: tensor([ -90.2428, -87.7525, -174.1110, -156.8580, -88.2660, -195.9300, + -256.0310, -8.6092, -122.2230, -52.9257]) +2025-06-27 15:01:42,488 - INFO - The first 10 points pressure for the sample_1: tensor([ -55.0505, -122.1260, 396.7610, -90.2323, -152.7490, -82.5651, + 328.6670, -427.7260, -100.4770, -27.4212]) +2025-06-27 15:01:42,495 - INFO - Batch: 20 +2025-06-27 15:01:42,495 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,495 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,495 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,496 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.7703, 3.6288, -0.0146, 0.1477, 2.8393, 2.8385, 1.3833, 3.0913, + 2.8498, -0.1425]) +2025-06-27 15:01:42,496 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.3972, 3.9183, 3.2692, 3.7785, -0.1782, 2.5976, -0.3481, 3.4233, + 3.6062, 0.2469]) +2025-06-27 15:01:42,497 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,497 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,497 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,497 - INFO - The first 10 points pressure for the sample_0: tensor([ -83.3786, -22.5281, -175.6870, -139.1320, -77.1722, -71.0416, + -81.8121, -42.6166, -128.7640, -126.2550]) +2025-06-27 15:01:42,497 - INFO - The first 10 points pressure for the sample_1: tensor([-201.5640, -16.7136, -106.6490, -42.4794, -175.8160, -8.5266, + -134.5120, -136.8020, -58.7743, -206.3970]) +2025-06-27 15:01:42,503 - INFO - Batch: 21 +2025-06-27 15:01:42,503 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,503 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,503 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,504 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.0842, 2.7824, -0.2797, 2.4688, 0.8248, 1.1311, -0.4703, 0.1026, + 2.5978, 1.4177]) +2025-06-27 15:01:42,504 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.4264, 2.8696, 1.6240, 2.9751, 2.6334, 0.5618, 0.1458, 0.3647, + -0.0344, 0.9135]) +2025-06-27 15:01:42,504 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,504 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,504 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,505 - INFO - The first 10 points pressure for the sample_0: tensor([-185.7600, -93.9326, -159.4260, -93.3238, 33.1694, -555.6100, + -83.0041, 6.0518, -82.7327, -174.1910]) +2025-06-27 15:01:42,505 - INFO - The first 10 points pressure for the sample_1: tensor([ -98.1512, -98.8278, -70.9374, -85.9046, -191.0780, 112.1150, + -187.3720, -214.2010, -247.4480, -105.1760]) +2025-06-27 15:01:42,513 - INFO - Batch: 22 +2025-06-27 15:01:42,513 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,513 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,513 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,514 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.2349, 0.9135, -0.1889, 0.2861, 1.0193, 1.2458, -0.4257, 0.1707, + 2.7322, 2.7668]) +2025-06-27 15:01:42,514 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.9975, -0.4127, 3.8528, 2.7812, 0.0457, 1.7387, -0.1865, 2.7813, + 0.2707, -0.6109]) +2025-06-27 15:01:42,514 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,514 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,514 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,515 - INFO - The first 10 points pressure for the sample_0: tensor([ 132.8470, -74.3528, -155.9420, -174.4330, -338.2300, -73.8055, + -185.3560, -112.9740, -96.7461, -106.6330]) +2025-06-27 15:01:42,515 - INFO - The first 10 points pressure for the sample_1: tensor([-373.3540, -141.6460, 25.1866, -86.2460, -147.9350, -224.0500, + -198.4290, -63.9653, 87.9137, -98.6002]) +2025-06-27 15:01:42,523 - INFO - Batch: 23 +2025-06-27 15:01:42,523 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,523 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,523 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,524 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.4362, 2.1394, -0.8130, -0.2289, -0.2236, 3.2831, -0.0146, -0.4587, + 1.9986, 2.7975]) +2025-06-27 15:01:42,524 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.2382, 2.4836, 0.0085, 1.8302, 2.6476, 3.4547, 2.7562, -0.8593, + 3.8241, 0.0800]) +2025-06-27 15:01:42,527 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,527 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,527 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,528 - INFO - The first 10 points pressure for the sample_0: tensor([-285.9120, -143.0830, 447.8620, -818.7190, -206.1260, -203.6470, + -181.9760, -92.8544, -55.5424, -15.3260]) +2025-06-27 15:01:42,528 - INFO - The first 10 points pressure for the sample_1: tensor([ -97.2788, -92.2684, -135.6480, -96.3057, -67.3315, -145.7940, + -66.4935, 439.0400, -4.7178, -107.6100]) +2025-06-27 15:01:42,533 - INFO - Batch: 24 +2025-06-27 15:01:42,533 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,533 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,534 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,534 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.6553, -0.2362, 3.2621, 3.5150, 3.2970, -0.3015, 1.3146, 3.5571, + 2.3335, 0.9049]) +2025-06-27 15:01:42,535 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.6814, 2.5220, 2.0823, 3.5908, 0.2836, 0.8439, 3.6716, -0.1743, + 2.4019, 2.6665]) +2025-06-27 15:01:42,535 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,535 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,535 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,535 - INFO - The first 10 points pressure for the sample_0: tensor([ -74.1879, -109.2970, -83.0217, -42.2152, -95.3424, -155.8450, + -83.9559, -30.1028, -67.7424, -12.2910]) +2025-06-27 15:01:42,535 - INFO - The first 10 points pressure for the sample_1: tensor([ -7.6114, -59.0061, -47.3247, -30.2943, -39.4249, -351.7790, + -19.8623, -195.5630, -50.2555, -120.2700]) +2025-06-27 15:01:42,544 - INFO - Batch: 25 +2025-06-27 15:01:42,544 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,544 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,544 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,545 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.0024, 2.6824, -0.5860, 3.1021, 0.9962, -0.3762, 0.0654, -0.5734, + 0.3521, -0.9314]) +2025-06-27 15:01:42,545 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 1.0195, 3.0786, -0.9305, 3.6379, -0.0828, 1.6584, 0.9161, 0.9135, + -0.0261, 0.5813]) +2025-06-27 15:01:42,545 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,545 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,545 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,546 - INFO - The first 10 points pressure for the sample_0: tensor([-8.8256e+01, -8.6504e+01, -3.2214e+02, -3.3256e+02, -5.7983e+02, + -8.2140e+01, -1.0871e+02, 4.1819e-02, -1.4809e+02, 2.5899e+02]) +2025-06-27 15:01:42,546 - INFO - The first 10 points pressure for the sample_1: tensor([ -91.5531, -88.4385, 420.0560, -11.4785, -138.2240, -229.8700, + -425.4430, -91.2196, -150.3880, -118.3980]) +2025-06-27 15:01:42,553 - INFO - Batch: 26 +2025-06-27 15:01:42,553 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,553 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,553 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,554 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 3.2496, 0.8104, -0.5261, -0.2329, 0.4323, 3.6518, 3.7327, 1.9652, + -0.4947, -0.5774]) +2025-06-27 15:01:42,554 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.8151, 0.9867, 0.0400, 0.2850, 0.4326, 3.5833, -0.0836, -0.7234, + -0.7741, 1.5676]) +2025-06-27 15:01:42,554 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,554 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,555 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,555 - INFO - The first 10 points pressure for the sample_0: tensor([ 32.1262, -80.0883, -123.9470, -195.3070, -100.2010, 29.2670, + -16.8960, -86.8586, 3.6613, 54.4899]) +2025-06-27 15:01:42,555 - INFO - The first 10 points pressure for the sample_1: tensor([ -59.2862, -21.4684, -132.8340, -111.0530, -52.8657, 50.1366, + -194.1660, 113.6450, 358.9430, -226.3790]) +2025-06-27 15:01:42,563 - INFO - Batch: 27 +2025-06-27 15:01:42,563 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,563 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,563 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,564 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.3280, 0.9479, 0.6844, 0.5470, -0.5607, 0.8677, 2.4142, 1.2802, + 0.9326, 2.3344]) +2025-06-27 15:01:42,565 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.4468, 2.4833, 2.7102, 3.8827, 3.1250, 1.1427, 1.0396, -0.1865, + 2.7696, 0.5240]) +2025-06-27 15:01:42,565 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,565 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,565 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,565 - INFO - The first 10 points pressure for the sample_0: tensor([-172.0560, -57.6567, -102.8640, -106.8040, -207.4080, -38.4566, + -56.6369, -76.8144, -486.2720, -36.1637]) +2025-06-27 15:01:42,565 - INFO - The first 10 points pressure for the sample_1: tensor([ 16.2851, -143.7690, -95.7955, -35.1342, -63.5244, -93.4655, + -68.2843, -181.9770, -75.4459, -66.5618]) +2025-06-27 15:01:42,574 - INFO - Batch: 28 +2025-06-27 15:01:42,574 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,574 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,574 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,574 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.1690, 3.0182, -0.3117, 1.0167, 0.8448, 0.3865, -0.6800, 0.1000, + -0.8942, 2.8359]) +2025-06-27 15:01:42,575 - INFO - The first 10 points in x_coor for the sample_1: tensor([-3.1358e-03, 3.7210e+00, 3.7106e+00, 1.5323e+00, -1.1745e-01, + 1.4169e+00, 2.9957e+00, -2.5156e-02, -2.6220e-02, 1.2272e-01]) +2025-06-27 15:01:42,575 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,575 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,575 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,575 - INFO - The first 10 points pressure for the sample_0: tensor([ -71.8076, -28.2225, -180.3700, -69.3480, -93.5420, -103.9380, + -31.1451, -282.2930, -208.7450, -186.7030]) +2025-06-27 15:01:42,576 - INFO - The first 10 points pressure for the sample_1: tensor([-149.8820, -47.8371, -30.8754, -61.2213, -179.6550, -269.8940, + -65.7033, -205.9630, -144.7190, -148.6480]) +2025-06-27 15:01:42,583 - INFO - Batch: 29 +2025-06-27 15:01:42,583 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,583 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,583 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,584 - INFO - The first 10 points in x_coor for the sample_0: tensor([0.1459, 1.8531, 0.2792, 1.1312, 0.3291, 1.7615, 0.3698, 0.2260, 2.2655, + 0.0992]) +2025-06-27 15:01:42,585 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.2671, 0.0199, 0.9527, 1.0279, 3.4555, 1.6812, 2.3703, -0.1861, + 3.2626, 0.0304]) +2025-06-27 15:01:42,585 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,585 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,585 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,585 - INFO - The first 10 points pressure for the sample_0: tensor([-176.4800, -56.3783, -66.4860, -107.8410, -189.6050, -66.6120, + -43.6896, -199.4750, -179.7610, -173.2540]) +2025-06-27 15:01:42,585 - INFO - The first 10 points pressure for the sample_1: tensor([-137.7900, -144.6510, -590.5480, -509.6860, -54.0269, -106.7610, + -76.1703, -273.7640, -135.9330, -154.3830]) +2025-06-27 15:01:42,592 - INFO - Batch: 30 +2025-06-27 15:01:42,592 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,592 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,592 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,593 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.9479, 0.3987, -0.1521, 1.9906, -0.0118, -0.5948, 2.6331, 0.5215, + 2.7021, 0.5607]) +2025-06-27 15:01:42,593 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.5999, 1.7842, 3.0147, 2.8585, 2.1510, 1.4178, -0.8385, 0.4896, + 3.3658, 2.4531]) +2025-06-27 15:01:42,593 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,593 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,593 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,594 - INFO - The first 10 points pressure for the sample_0: tensor([ -50.8184, 107.2220, -172.7260, -58.7541, -100.8550, -93.4919, + -255.4690, -141.1370, -97.2591, 154.2220]) +2025-06-27 15:01:42,594 - INFO - The first 10 points pressure for the sample_1: tensor([ -92.1932, -75.5878, -113.2290, -145.7610, -83.5426, -248.8180, + 267.4870, -147.7300, -47.9468, -80.9790]) +2025-06-27 15:01:42,600 - INFO - Batch: 31 +2025-06-27 15:01:42,601 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,601 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,601 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,601 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.4635, -0.1174, 0.6167, -0.9828, 1.6812, 1.0847, 1.7729, -0.1520, + 2.5627, 3.0302]) +2025-06-27 15:01:42,601 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.3752, 0.2724, -0.4835, 0.9250, 3.6627, 2.8386, 0.8557, 3.9218, + 3.1210, -0.3901]) +2025-06-27 15:01:42,602 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,602 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,602 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,602 - INFO - The first 10 points pressure for the sample_0: tensor([ -95.1635, -107.9970, 137.8990, -377.8830, -94.7552, -76.7119, + -94.5342, -96.3450, -94.3630, -47.7246]) +2025-06-27 15:01:42,602 - INFO - The first 10 points pressure for the sample_1: tensor([ 26.8221, -139.6480, -68.4088, -78.4813, -92.9359, -102.2890, + -458.8600, -51.0210, -36.5807, -119.5220]) +2025-06-27 15:01:42,609 - INFO - Batch: 32 +2025-06-27 15:01:42,609 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,609 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,609 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,610 - INFO - The first 10 points in x_coor for the sample_0: tensor([2.6552, 2.8621, 3.0570, 2.2725, 2.7130, 3.0448, 0.1232, 2.8604, 3.6765, + 0.7417]) +2025-06-27 15:01:42,610 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.7915, 0.4004, 0.1917, 3.7563, -0.1262, 2.2084, 3.2492, 1.6240, + -0.9586, 0.3788]) +2025-06-27 15:01:42,610 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,610 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,610 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,611 - INFO - The first 10 points pressure for the sample_0: tensor([-142.5580, -140.2040, -94.3418, -85.5900, -165.3290, -8.9998, + -434.2130, 24.8030, -15.0267, -87.2577]) +2025-06-27 15:01:42,611 - INFO - The first 10 points pressure for the sample_1: tensor([ -33.3455, 82.1770, -196.9200, -72.2047, -49.2621, -23.6994, + -41.9599, -72.0221, 442.1610, -72.5108]) +2025-06-27 15:01:42,763 - INFO - Batch: 33 +2025-06-27 15:01:42,764 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,764 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,764 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,765 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.3813, -0.6180, 2.7117, -0.0955, 2.3878, 1.8990, -0.2204, -0.1854, + 2.4366, -0.0807]) +2025-06-27 15:01:42,765 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 1.4519, 0.0771, 0.2691, -0.4843, 2.0249, 3.3312, 3.5800, 2.7106, + 2.6875, -0.5732]) +2025-06-27 15:01:42,765 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,765 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,765 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,766 - INFO - The first 10 points pressure for the sample_0: tensor([-334.7100, -208.6040, -78.3032, -196.7590, -80.4406, -74.0066, + -64.5444, -148.8190, -204.1940, -21.9310]) +2025-06-27 15:01:42,766 - INFO - The first 10 points pressure for the sample_1: tensor([ -52.1155, -159.1580, -123.9140, -49.3530, -202.4540, 45.0232, + -26.4776, -63.7452, -51.6638, -429.7930]) +2025-06-27 15:01:42,773 - INFO - Batch: 34 +2025-06-27 15:01:42,773 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,773 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,773 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,774 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.6435, 3.1134, 1.0625, 1.1242, 0.7875, 0.9040, 2.9080, -0.8353, + -0.2323, 2.7696]) +2025-06-27 15:01:42,774 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.1132, 1.8528, 3.9148, -0.2349, 3.9169, 1.4979, 1.2687, 0.0887, + 1.0622, 3.4802]) +2025-06-27 15:01:42,774 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,774 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,774 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,775 - INFO - The first 10 points pressure for the sample_0: tensor([-122.1790, -83.9981, -116.2120, -243.7820, -167.9570, -46.7163, + -56.8178, 326.6480, -345.4950, -143.3180]) +2025-06-27 15:01:42,775 - INFO - The first 10 points pressure for the sample_1: tensor([ -90.0224, -93.3419, -30.0798, -173.1290, -79.8611, -52.6971, + -87.8026, -31.6445, -235.4820, 70.6987]) +2025-06-27 15:01:42,782 - INFO - Batch: 35 +2025-06-27 15:01:42,782 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,782 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,782 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,782 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.8641, 0.8677, 1.8042, 0.1116, -0.8959, 3.5810, 0.5146, 3.5696, + 1.7614, 2.7114]) +2025-06-27 15:01:42,783 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.0964, 2.4975, 2.4031, 1.5668, -0.8922, 3.7699, 1.2573, 0.2626, + 0.5347, -0.2470]) +2025-06-27 15:01:42,783 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,783 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,783 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,783 - INFO - The first 10 points pressure for the sample_0: tensor([-111.9880, -129.0600, -73.4888, -137.5430, 341.9980, -36.2054, + 107.2540, -18.7434, -79.0948, -57.4639]) +2025-06-27 15:01:42,783 - INFO - The first 10 points pressure for the sample_1: tensor([-118.6720, -88.8445, -147.5880, -248.5680, 302.8140, 0.5286, + -86.7859, -110.4070, 12.7982, -160.6780]) +2025-06-27 15:01:42,790 - INFO - Batch: 36 +2025-06-27 15:01:42,790 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,790 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,790 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,790 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.1916, -0.3633, 0.0083, 0.6492, 1.6015, 3.7575, 2.5296, 2.8156, + 2.3077, 2.9862]) +2025-06-27 15:01:42,791 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.5868, 0.0720, 3.6466, 2.0823, -0.1516, 3.5156, 1.3031, 1.5552, + 2.6781, 2.8837]) +2025-06-27 15:01:42,791 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,791 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,791 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,791 - INFO - The first 10 points pressure for the sample_0: tensor([ -13.6639, -159.2940, -155.5510, 184.0170, -97.3904, -22.4263, + -407.6130, -40.0012, -152.1620, -24.3921]) +2025-06-27 15:01:42,791 - INFO - The first 10 points pressure for the sample_1: tensor([ -96.5537, -158.0490, -23.8407, -73.7478, -86.8836, 4.4890, + -81.7787, -66.9959, -205.5900, -51.5440]) +2025-06-27 15:01:42,797 - INFO - Batch: 37 +2025-06-27 15:01:42,797 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,797 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,797 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,798 - INFO - The first 10 points in x_coor for the sample_0: tensor([2.6894, 0.8658, 1.3604, 1.4868, 3.7887, 0.5710, 0.5009, 2.9438, 2.0263, + 3.3437]) +2025-06-27 15:01:42,798 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 3.0557, 0.5470, -0.3219, -0.7490, -0.5760, 2.0250, 1.9906, -0.1979, + 0.0991, -0.6017]) +2025-06-27 15:01:42,798 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,798 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,798 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,799 - INFO - The first 10 points pressure for the sample_0: tensor([-103.9700, 370.8800, -77.0428, -54.8544, -41.9449, 105.4260, + -92.5658, -44.6539, -78.3615, -40.8942]) +2025-06-27 15:01:42,799 - INFO - The first 10 points pressure for the sample_1: tensor([ -53.7789, -136.5090, -105.3350, 409.5120, -203.6270, -100.4900, + -98.7160, -204.2220, -43.8484, 83.2925]) +2025-06-27 15:01:42,805 - INFO - Batch: 38 +2025-06-27 15:01:42,805 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,805 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,805 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,806 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.3117, 0.4094, 0.0427, 3.1248, 2.7575, 2.6631, 0.5927, 2.8988, + 1.4521, 1.1771]) +2025-06-27 15:01:42,806 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.3688, -0.0817, 1.5323, 1.9906, 3.0661, 0.7188, 2.2313, 2.3115, + 3.2123, 0.4782]) +2025-06-27 15:01:42,806 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,806 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,806 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,807 - INFO - The first 10 points pressure for the sample_0: tensor([-268.5310, -283.7720, -251.0730, -140.5760, -87.6902, -242.1250, + -80.3776, -84.8839, -90.4834, -90.0195]) +2025-06-27 15:01:42,807 - INFO - The first 10 points pressure for the sample_1: tensor([-126.9870, -168.7350, -169.6220, -70.4165, -45.4687, -118.2910, + -72.3663, -65.5506, -84.8359, -71.2502]) +2025-06-27 15:01:42,813 - INFO - Batch: 39 +2025-06-27 15:01:42,813 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,813 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,813 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,814 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.6710, 1.9803, 1.3950, 2.8383, 0.8219, 1.8760, 0.0427, 0.6495, + 0.3333, 2.8042]) +2025-06-27 15:01:42,814 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.3143, 2.9865, 2.9771, 2.9417, 1.0311, 3.5053, 1.4050, 0.1797, + 2.7006, -0.0968]) +2025-06-27 15:01:42,814 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,814 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,814 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,815 - INFO - The first 10 points pressure for the sample_0: tensor([ -95.5753, -94.2381, -310.8570, -109.1800, -81.4626, -70.8335, + -153.2640, -75.0069, 33.6827, -70.6522]) +2025-06-27 15:01:42,815 - INFO - The first 10 points pressure for the sample_1: tensor([-175.4440, -149.5730, -123.9650, -90.3130, -144.0810, -50.9281, + -253.8000, -57.1164, -173.6610, -177.1350]) +2025-06-27 15:01:42,821 - INFO - Batch: 40 +2025-06-27 15:01:42,821 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,821 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,821 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,821 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.7469, 2.7587, 0.7073, 2.7011, 3.4110, 2.1052, 2.5066, 3.0369, + -0.5161, 2.3114]) +2025-06-27 15:01:42,821 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.4209, 0.4208, 1.7844, -0.3690, 0.4459, 1.6354, 3.5461, 2.6436, + 1.8417, 1.7042]) +2025-06-27 15:01:42,822 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,822 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,822 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,822 - INFO - The first 10 points pressure for the sample_0: tensor([ -71.5105, -83.9507, -115.8100, -88.5756, -51.6981, -55.6062, + -33.3012, -96.3630, -220.3480, -136.4290]) +2025-06-27 15:01:42,822 - INFO - The first 10 points pressure for the sample_1: tensor([ -49.1153, -110.7010, -76.5956, -86.4747, 126.2600, -100.9660, + -14.0673, -63.5962, -83.5589, -76.9001]) +2025-06-27 15:01:42,828 - INFO - Batch: 41 +2025-06-27 15:01:42,828 - INFO - Batch.points.shape: torch.Size([2, 1, 3, 10000]) +2025-06-27 15:01:42,828 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:01:42,828 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:01:42,829 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 3.1197, -0.7715, 4.0171, 3.3372, 0.6860, 1.1312, 0.2249, -0.3013, + 4.0153, 3.0571]) +2025-06-27 15:01:42,829 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.2488, -0.1749, -0.5541, 3.5946, 3.1490, -0.4508, 2.4746, 1.0854, + 1.8416, 1.2000]) +2025-06-27 15:01:42,829 - INFO - Batch.Pressure.shape: torch.Size([2, 1, 10000]) +2025-06-27 15:01:42,829 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:01:42,829 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:01:42,829 - INFO - The first 10 points pressure for the sample_0: tensor([-139.8260, 26.5946, -19.0641, -114.9630, 187.9530, -74.7365, + 26.5352, -145.7950, -19.5303, -161.1850]) +2025-06-27 15:01:42,830 - INFO - The first 10 points pressure for the sample_1: tensor([ 43.5840, -199.3970, -144.1050, 10.0279, -68.8296, -107.1940, + -66.7033, -66.0018, -68.5476, -84.6221]) +2025-06-27 15:01:43,477 - INFO - Type of train_subset: +2025-06-27 15:01:43,477 - INFO - Number of samples of train_subset : 85 +2025-06-27 15:01:43,477 - INFO - Subset indices: [0, 1, 2, 4, 6] +2025-06-27 15:01:43,477 - INFO - List the train_subset vtk files: +2025-06-27 15:01:43,478 - INFO - 0: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_073.vtk +2025-06-27 15:01:43,478 - INFO - 1: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_051.vtk +2025-06-27 15:01:43,478 - INFO - 2: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_099.vtk +2025-06-27 15:01:43,478 - INFO - 3: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_164.vtk +2025-06-27 15:01:43,478 - INFO - 4: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_030.vtk +2025-06-27 15:01:43,478 - INFO - 5: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_132.vtk +2025-06-27 15:01:43,478 - INFO - 6: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_107.vtk +2025-06-27 15:01:43,478 - INFO - 7: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_186.vtk +2025-06-27 15:01:43,478 - INFO - 8: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_079.vtk +2025-06-27 15:01:43,478 - INFO - 9: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_018.vtk +2025-06-27 15:01:43,478 - INFO - 10: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_015.vtk +2025-06-27 15:01:43,478 - INFO - 11: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_191.vtk +2025-06-27 15:01:43,478 - INFO - 12: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_084.vtk +2025-06-27 15:01:43,478 - INFO - 13: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_129.vtk +2025-06-27 15:01:43,478 - INFO - 14: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_080.vtk +2025-06-27 15:01:43,478 - INFO - 15: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_010.vtk +2025-06-27 15:01:43,478 - INFO - 16: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_094.vtk +2025-06-27 15:01:43,478 - INFO - 17: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_035.vtk +2025-06-27 15:01:43,478 - INFO - 18: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk +2025-06-27 15:01:43,478 - INFO - 19: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_142.vtk +2025-06-27 15:01:43,478 - INFO - 20: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk +2025-06-27 15:01:43,478 - INFO - 21: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_024.vtk +2025-06-27 15:01:43,478 - INFO - 22: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_106.vtk +2025-06-27 15:01:43,478 - INFO - 23: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_161.vtk +2025-06-27 15:01:43,478 - INFO - 24: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_171.vtk +2025-06-27 15:01:43,478 - INFO - 25: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_156.vtk +2025-06-27 15:01:43,478 - INFO - 26: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_053.vtk +2025-06-27 15:01:43,478 - INFO - 27: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_059.vtk +2025-06-27 15:01:43,479 - INFO - 28: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_082.vtk +2025-06-27 15:01:43,479 - INFO - 29: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_109.vtk +2025-06-27 15:01:43,479 - INFO - 30: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_139.vtk +2025-06-27 15:01:43,479 - INFO - 31: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_055.vtk +2025-06-27 15:01:43,479 - INFO - 32: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_088.vtk +2025-06-27 15:01:43,479 - INFO - 33: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_174.vtk +2025-06-27 15:01:43,479 - INFO - 34: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_104.vtk +2025-06-27 15:01:43,479 - INFO - 35: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_014.vtk +2025-06-27 15:01:43,479 - INFO - 36: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_072.vtk +2025-06-27 15:01:43,479 - INFO - 37: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_146.vtk +2025-06-27 15:01:43,479 - INFO - 38: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_194.vtk +2025-06-27 15:01:43,479 - INFO - 39: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_066.vtk +2025-06-27 15:01:43,479 - INFO - 40: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_067.vtk +2025-06-27 15:01:43,479 - INFO - 41: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_130.vtk +2025-06-27 15:01:43,479 - INFO - 42: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_131.vtk +2025-06-27 15:01:43,479 - INFO - 43: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_182.vtk +2025-06-27 15:01:43,479 - INFO - 44: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_049.vtk +2025-06-27 15:01:43,479 - INFO - 45: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_022.vtk +2025-06-27 15:01:43,479 - INFO - 46: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_042.vtk +2025-06-27 15:01:43,479 - INFO - 47: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_036.vtk +2025-06-27 15:01:43,479 - INFO - 48: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_137.vtk +2025-06-27 15:01:43,479 - INFO - 49: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_187.vtk +2025-06-27 15:01:43,479 - INFO - 50: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_195.vtk +2025-06-27 15:01:43,479 - INFO - 51: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_180.vtk +2025-06-27 15:01:43,479 - INFO - 52: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_119.vtk +2025-06-27 15:01:43,479 - INFO - 53: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_169.vtk +2025-06-27 15:01:43,479 - INFO - 54: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_092.vtk +2025-06-27 15:01:43,480 - INFO - 55: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk +2025-06-27 15:01:43,480 - INFO - 56: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_021.vtk +2025-06-27 15:01:43,480 - INFO - 57: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_192.vtk +2025-06-27 15:01:43,480 - INFO - 58: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_100.vtk +2025-06-27 15:01:43,480 - INFO - 59: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_108.vtk +2025-06-27 15:01:43,480 - INFO - 60: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_093.vtk +2025-06-27 15:01:43,480 - INFO - 61: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_149.vtk +2025-06-27 15:01:43,480 - INFO - 62: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_060.vtk +2025-06-27 15:01:43,480 - INFO - 63: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_027.vtk +2025-06-27 15:01:43,480 - INFO - 64: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_061.vtk +2025-06-27 15:01:43,480 - INFO - 65: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_096.vtk +2025-06-27 15:01:43,480 - INFO - 66: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_039.vtk +2025-06-27 15:01:43,480 - INFO - 67: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_118.vtk +2025-06-27 15:01:43,480 - INFO - 68: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_122.vtk +2025-06-27 15:01:43,480 - INFO - 69: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_153.vtk +2025-06-27 15:01:43,480 - INFO - 70: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_144.vtk +2025-06-27 15:01:43,480 - INFO - 71: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_138.vtk +2025-06-27 15:01:43,480 - INFO - 72: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_068.vtk +2025-06-27 15:01:43,480 - INFO - 73: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_115.vtk +2025-06-27 15:01:43,480 - INFO - 74: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_028.vtk +2025-06-27 15:01:43,480 - INFO - 75: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_136.vtk +2025-06-27 15:01:43,480 - INFO - 76: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_085.vtk +2025-06-27 15:01:43,480 - INFO - 77: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_097.vtk +2025-06-27 15:01:43,480 - INFO - 78: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_140.vtk +2025-06-27 15:01:43,480 - INFO - 79: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_190.vtk +2025-06-27 15:01:43,480 - INFO - 80: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_150.vtk +2025-06-27 15:01:43,480 - INFO - 81: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_170.vtk +2025-06-27 15:01:43,480 - INFO - 82: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_095.vtk +2025-06-27 15:01:43,480 - INFO - 83: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_071.vtk +2025-06-27 15:01:43,481 - INFO - 84: /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_155.vtk +2025-06-27 15:01:43,481 - INFO - Type of full_dataset: +2025-06-27 15:01:43,481 - INFO - Number of samples of full_dataset: 130 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_073.vtk: 0 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_051.vtk: 1 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_099.vtk: 2 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_009.vtk: 3 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_164.vtk: 4 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_166.vtk: 5 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_030.vtk: 6 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_132.vtk: 7 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_114.vtk: 8 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_107.vtk: 9 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_186.vtk: 10 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_175.vtk: 11 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_079.vtk: 12 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_018.vtk: 13 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_015.vtk: 14 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_191.vtk: 15 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_084.vtk: 16 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_032.vtk: 17 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_129.vtk: 18 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_080.vtk: 19 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_010.vtk: 20 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_094.vtk: 21 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_078.vtk: 22 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_035.vtk: 23 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_003.vtk: 24 +2025-06-27 15:01:43,481 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_020.vtk: 25 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_142.vtk: 26 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_005.vtk: 27 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_112.vtk: 28 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_125.vtk: 29 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_024.vtk: 30 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_106.vtk: 31 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_161.vtk: 32 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_171.vtk: 33 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_156.vtk: 34 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_053.vtk: 35 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_160.vtk: 36 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_059.vtk: 37 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_082.vtk: 38 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_109.vtk: 39 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_026.vtk: 40 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_139.vtk: 41 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_055.vtk: 42 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_088.vtk: 43 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_174.vtk: 44 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_104.vtk: 45 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_168.vtk: 46 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_014.vtk: 47 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_072.vtk: 48 +2025-06-27 15:01:43,482 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_147.vtk: 49 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_146.vtk: 50 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_194.vtk: 51 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_141.vtk: 52 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_002.vtk: 53 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_066.vtk: 54 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_050.vtk: 55 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_067.vtk: 56 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_130.vtk: 57 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_025.vtk: 58 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_019.vtk: 59 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_131.vtk: 60 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_182.vtk: 61 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_049.vtk: 62 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_022.vtk: 63 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_042.vtk: 64 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_177.vtk: 65 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_036.vtk: 66 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_137.vtk: 67 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_187.vtk: 68 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_195.vtk: 69 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_180.vtk: 70 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_178.vtk: 71 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_119.vtk: 72 +2025-06-27 15:01:43,483 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_169.vtk: 73 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_189.vtk: 74 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_092.vtk: 75 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_116.vtk: 76 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_001.vtk: 77 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_029.vtk: 78 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_021.vtk: 79 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_192.vtk: 80 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_100.vtk: 81 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_087.vtk: 82 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_108.vtk: 83 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_093.vtk: 84 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_034.vtk: 85 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_159.vtk: 86 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_089.vtk: 87 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_065.vtk: 88 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_149.vtk: 89 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_075.vtk: 90 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_060.vtk: 91 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_148.vtk: 92 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_027.vtk: 93 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_061.vtk: 94 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_096.vtk: 95 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_039.vtk: 96 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_058.vtk: 97 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_118.vtk: 98 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_091.vtk: 99 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_057.vtk: 100 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_122.vtk: 101 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_153.vtk: 102 +2025-06-27 15:01:43,484 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_144.vtk: 103 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_138.vtk: 104 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_068.vtk: 105 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_115.vtk: 106 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_135.vtk: 107 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_044.vtk: 108 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_028.vtk: 109 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_136.vtk: 110 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_085.vtk: 111 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_097.vtk: 112 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_121.vtk: 113 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_017.vtk: 114 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_140.vtk: 115 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_190.vtk: 116 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_023.vtk: 117 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_047.vtk: 118 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_150.vtk: 119 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_070.vtk: 120 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_170.vtk: 121 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_110.vtk: 122 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_081.vtk: 123 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_172.vtk: 124 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_095.vtk: 125 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_071.vtk: 126 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_117.vtk: 127 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_155.vtk: 128 +2025-06-27 15:01:43,485 - INFO - /work/mae-zhangbj/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/Pressure_VTK/N_S_WWS_WM_043.vtk: 129 +2025-06-27 15:01:43,485 - INFO - Data loaded: 42 training batches, 10 validation batches, 12 test batches +2025-06-27 15:01:43,489 - INFO - Staring training for 10 epochs +2025-06-27 15:04:09,850 - INFO - args.exp_name : Train_Test +2025-06-27 15:04:09,851 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=10, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 15:04:09,851 - INFO - Starting training with 1 GPUs +2025-06-27 15:04:12,776 - INFO - Total trainable parameters: 1437705 +2025-06-27 15:04:12,841 - INFO - Type of train_dataloader: +2025-06-27 15:04:12,841 - INFO - Number of train_dataloader: 8 +2025-06-27 15:04:12,841 - INFO - We can access the internal conetnt by dataloader: +2025-06-27 15:04:16,989 - INFO - Batch: 0 +2025-06-27 15:04:16,989 - INFO - Batch.points.shape: torch.Size([10, 1, 3, 10000]) +2025-06-27 15:04:16,990 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:04:16,990 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:04:16,993 - INFO - The first 10 points in x_coor for the sample_0: tensor([-0.0485, -0.5382, 0.8928, -0.5284, 0.1444, 2.6208, 2.9762, 3.6200, + -0.3787, 1.3031]) +2025-06-27 15:04:16,994 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.2833, 2.6895, 2.9486, 1.6583, -0.7939, 2.0023, 3.1221, -0.4969, + 1.6010, 3.6084]) +2025-06-27 15:04:16,994 - INFO - Batch.Pressure.shape: torch.Size([10, 1, 10000]) +2025-06-27 15:04:16,994 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:04:16,994 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:04:16,994 - INFO - The first 10 points pressure for the sample_0: tensor([-338.8030, -198.2250, -328.4480, -501.5070, -109.0920, -77.5971, + -91.8080, -29.4908, -284.0090, -91.4154]) +2025-06-27 15:04:16,995 - INFO - The first 10 points pressure for the sample_1: tensor([-184.4790, -81.4952, -111.3050, -65.2745, 262.2230, -241.2780, + -14.4769, -133.4740, -90.2832, -18.6696]) +2025-06-27 15:04:17,032 - INFO - Batch: 1 +2025-06-27 15:04:17,032 - INFO - Batch.points.shape: torch.Size([10, 1, 3, 10000]) +2025-06-27 15:04:17,032 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:04:17,032 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:04:17,033 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.5521, 2.6666, 0.5497, 0.1573, 2.8155, 2.9984, -0.1173, 2.5646, + -0.9318, 0.5025]) +2025-06-27 15:04:17,033 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.3631, 3.3885, 0.4810, -0.2440, 1.7385, -0.4245, -0.0946, -0.6986, + 2.2198, 2.6981]) +2025-06-27 15:04:17,033 - INFO - Batch.Pressure.shape: torch.Size([10, 1, 10000]) +2025-06-27 15:04:17,033 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:04:17,033 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:04:17,034 - INFO - The first 10 points pressure for the sample_0: tensor([ -84.8091, -77.7356, 97.0457, -153.8820, -132.9120, -61.9000, + -132.3560, -76.8720, 436.0530, 81.3881]) +2025-06-27 15:04:17,037 - INFO - The first 10 points pressure for the sample_1: tensor([-102.3700, -15.2985, 80.3081, -129.9610, -95.3753, -90.2176, + -133.5760, 22.5415, -142.7650, -88.9344]) +2025-06-27 15:04:17,093 - INFO - Batch: 2 +2025-06-27 15:04:17,093 - INFO - Batch.points.shape: torch.Size([10, 1, 3, 10000]) +2025-06-27 15:04:17,093 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:04:17,094 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:04:17,094 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.0061, 0.3184, 1.2802, 1.1238, 2.8385, -0.5982, 2.9736, 3.1936, + 2.8499, -0.7896]) +2025-06-27 15:04:17,095 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 2.7120, -0.3803, 2.9076, 2.5315, 1.9188, -0.3108, 3.8506, 0.5791, + 2.8501, 0.2604]) +2025-06-27 15:04:17,095 - INFO - Batch.Pressure.shape: torch.Size([10, 1, 10000]) +2025-06-27 15:04:17,095 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:04:17,095 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:04:17,096 - INFO - The first 10 points pressure for the sample_0: tensor([-119.4090, -7.9263, -97.0791, -215.6410, -71.9722, -38.5220, + -76.4743, 1.0474, -67.6804, 296.0490]) +2025-06-27 15:04:17,096 - INFO - The first 10 points pressure for the sample_1: tensor([-112.9390, -178.6530, -134.2990, -31.5704, -71.9506, -192.3010, + -16.1056, 163.5570, -113.4070, -174.5600]) +2025-06-27 15:04:17,129 - INFO - Batch: 3 +2025-06-27 15:04:17,130 - INFO - Batch.points.shape: torch.Size([10, 1, 3, 10000]) +2025-06-27 15:04:17,130 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:04:17,130 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:04:17,131 - INFO - The first 10 points in x_coor for the sample_0: tensor([1.6354, 2.7358, 3.0108, 2.9425, 0.5582, 3.3426, 1.6927, 2.6554, 1.1885, + 2.9646]) +2025-06-27 15:04:17,131 - INFO - The first 10 points in x_coor for the sample_1: tensor([0.7073, 2.6755, 0.4456, 0.2009, 3.9422, 2.2771, 2.5980, 2.0823, 1.0969, + 0.8215]) +2025-06-27 15:04:17,131 - INFO - Batch.Pressure.shape: torch.Size([10, 1, 10000]) +2025-06-27 15:04:17,131 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:04:17,131 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:04:17,131 - INFO - The first 10 points pressure for the sample_0: tensor([ -53.3478, -249.1540, -217.1740, -38.3287, -57.3577, -39.5899, + -53.4930, -198.4300, -62.9154, -72.7477]) +2025-06-27 15:04:17,132 - INFO - The first 10 points pressure for the sample_1: tensor([ -69.7244, -171.5290, 77.3724, -124.6980, -16.5869, -37.1627, + -110.4390, -50.8352, -93.0625, 35.6096]) +2025-06-27 15:04:17,166 - INFO - Batch: 4 +2025-06-27 15:04:17,166 - INFO - Batch.points.shape: torch.Size([10, 1, 3, 10000]) +2025-06-27 15:04:17,166 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:04:17,166 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:04:17,167 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.7703, 3.6288, -0.0146, 0.1477, 2.8393, 2.8385, 1.3833, 3.0913, + 2.8498, -0.1425]) +2025-06-27 15:04:17,167 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.3972, 3.9183, 3.2692, 3.7785, -0.1782, 2.5976, -0.3481, 3.4233, + 3.6062, 0.2469]) +2025-06-27 15:04:17,168 - INFO - Batch.Pressure.shape: torch.Size([10, 1, 10000]) +2025-06-27 15:04:17,168 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:04:17,168 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:04:17,168 - INFO - The first 10 points pressure for the sample_0: tensor([ -83.3786, -22.5281, -175.6870, -139.1320, -77.1722, -71.0416, + -81.8121, -42.6166, -128.7640, -126.2550]) +2025-06-27 15:04:17,168 - INFO - The first 10 points pressure for the sample_1: tensor([-201.5640, -16.7136, -106.6490, -42.4794, -175.8160, -8.5266, + -134.5120, -136.8020, -58.7743, -206.3970]) +2025-06-27 15:04:17,201 - INFO - Batch: 5 +2025-06-27 15:04:17,202 - INFO - Batch.points.shape: torch.Size([10, 1, 3, 10000]) +2025-06-27 15:04:17,202 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:04:17,202 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:04:17,203 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 2.0024, 2.6824, -0.5860, 3.1021, 0.9962, -0.3762, 0.0654, -0.5734, + 0.3521, -0.9314]) +2025-06-27 15:04:17,203 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 1.0195, 3.0786, -0.9305, 3.6379, -0.0828, 1.6584, 0.9161, 0.9135, + -0.0261, 0.5813]) +2025-06-27 15:04:17,203 - INFO - Batch.Pressure.shape: torch.Size([10, 1, 10000]) +2025-06-27 15:04:17,203 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:04:17,203 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:04:17,204 - INFO - The first 10 points pressure for the sample_0: tensor([-8.8256e+01, -8.6504e+01, -3.2214e+02, -3.3256e+02, -5.7983e+02, + -8.2140e+01, -1.0871e+02, 4.1819e-02, -1.4809e+02, 2.5899e+02]) +2025-06-27 15:04:17,204 - INFO - The first 10 points pressure for the sample_1: tensor([ -91.5531, -88.4385, 420.0560, -11.4785, -138.2240, -229.8700, + -425.4430, -91.2196, -150.3880, -118.3980]) +2025-06-27 15:04:17,230 - INFO - Batch: 6 +2025-06-27 15:04:17,231 - INFO - Batch.points.shape: torch.Size([10, 1, 3, 10000]) +2025-06-27 15:04:17,231 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:04:17,231 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:04:17,232 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 0.9479, 0.3987, -0.1521, 1.9906, -0.0118, -0.5948, 2.6331, 0.5215, + 2.7021, 0.5607]) +2025-06-27 15:04:17,232 - INFO - The first 10 points in x_coor for the sample_1: tensor([-0.5999, 1.7842, 3.0147, 2.8585, 2.1510, 1.4178, -0.8385, 0.4896, + 3.3658, 2.4531]) +2025-06-27 15:04:17,232 - INFO - Batch.Pressure.shape: torch.Size([10, 1, 10000]) +2025-06-27 15:04:17,232 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:04:17,232 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:04:17,232 - INFO - The first 10 points pressure for the sample_0: tensor([ -50.8184, 107.2220, -172.7260, -58.7541, -100.8550, -93.4919, + -255.4690, -141.1370, -97.2591, 154.2220]) +2025-06-27 15:04:17,233 - INFO - The first 10 points pressure for the sample_1: tensor([ -92.1932, -75.5878, -113.2290, -145.7610, -83.5426, -248.8180, + 267.4870, -147.7300, -47.9468, -80.9790]) +2025-06-27 15:04:17,396 - INFO - Batch: 7 +2025-06-27 15:04:17,396 - INFO - Batch.points.shape: torch.Size([10, 1, 3, 10000]) +2025-06-27 15:04:17,396 - INFO - points_sample_0.shape: torch.Size([1, 3, 10000]) +2025-06-27 15:04:17,396 - INFO - points_sample_0.shape: torch.Size([3, 10000]) +2025-06-27 15:04:17,397 - INFO - The first 10 points in x_coor for the sample_0: tensor([ 1.8641, 0.8677, 1.8042, 0.1116, -0.8959, 3.5810, 0.5146, 3.5696, + 1.7614, 2.7114]) +2025-06-27 15:04:17,397 - INFO - The first 10 points in x_coor for the sample_1: tensor([ 0.0964, 2.4975, 2.4031, 1.5668, -0.8922, 3.7699, 1.2573, 0.2626, + 0.5347, -0.2470]) +2025-06-27 15:04:17,397 - INFO - Batch.Pressure.shape: torch.Size([10, 1, 10000]) +2025-06-27 15:04:17,397 - INFO - pressure_sample_0.shape: torch.Size([1, 10000]) +2025-06-27 15:04:17,397 - INFO - pressure_sample_0.shape: torch.Size([10000]) +2025-06-27 15:04:17,398 - INFO - The first 10 points pressure for the sample_0: tensor([-111.9880, -129.0600, -73.4888, -137.5430, 341.9980, -36.2054, + 107.2540, -18.7434, -79.0948, -57.4639]) +2025-06-27 15:04:17,398 - INFO - The first 10 points pressure for the sample_1: tensor([-118.6720, -88.8445, -147.5880, -248.5680, 302.8140, 0.5286, + -86.7859, -110.4070, 12.7982, -160.6780]) +2025-06-27 15:04:18,007 - INFO - Type of train_subset: +2025-06-27 15:04:18,008 - INFO - Number of samples of train_subset : 85 +2025-06-27 15:04:18,008 - INFO - Subset indices: [0, 1, 2, 4, 6] +2025-06-27 15:04:18,008 - INFO - List the train_subset vtk files: +2025-06-27 15:04:18,008 - INFO - 0: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_073.vtk +2025-06-27 15:04:18,008 - INFO - 1: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_051.vtk +2025-06-27 15:04:18,008 - INFO - 2: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_099.vtk +2025-06-27 15:04:18,008 - INFO - 3: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_164.vtk +2025-06-27 15:04:18,008 - INFO - 4: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_030.vtk +2025-06-27 15:04:18,008 - INFO - 5: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_132.vtk +2025-06-27 15:04:18,008 - INFO - 6: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_107.vtk +2025-06-27 15:04:18,008 - INFO - 7: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_186.vtk +2025-06-27 15:04:18,008 - INFO - 8: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_079.vtk +2025-06-27 15:04:18,008 - INFO - 9: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_018.vtk +2025-06-27 15:04:18,009 - INFO - 10: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_015.vtk +2025-06-27 15:04:18,009 - INFO - 11: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_191.vtk +2025-06-27 15:04:18,009 - INFO - 12: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_084.vtk +2025-06-27 15:04:18,009 - INFO - 13: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_129.vtk +2025-06-27 15:04:18,009 - INFO - 14: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_080.vtk +2025-06-27 15:04:18,009 - INFO - 15: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_010.vtk +2025-06-27 15:04:18,009 - INFO - 16: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_094.vtk +2025-06-27 15:04:18,009 - INFO - 17: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_035.vtk +2025-06-27 15:04:18,009 - INFO - 18: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_003.vtk +2025-06-27 15:04:18,009 - INFO - 19: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_142.vtk +2025-06-27 15:04:18,009 - INFO - 20: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_005.vtk +2025-06-27 15:04:18,009 - INFO - 21: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_024.vtk +2025-06-27 15:04:18,009 - INFO - 22: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_106.vtk +2025-06-27 15:04:18,009 - INFO - 23: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_161.vtk +2025-06-27 15:04:18,009 - INFO - 24: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_171.vtk +2025-06-27 15:04:18,009 - INFO - 25: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_156.vtk +2025-06-27 15:04:18,009 - INFO - 26: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_053.vtk +2025-06-27 15:04:18,009 - INFO - 27: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_059.vtk +2025-06-27 15:04:18,009 - INFO - 28: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_082.vtk +2025-06-27 15:04:18,009 - INFO - 29: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_109.vtk +2025-06-27 15:04:18,009 - INFO - 30: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_139.vtk +2025-06-27 15:04:18,009 - INFO - 31: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_055.vtk +2025-06-27 15:04:18,009 - INFO - 32: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_088.vtk +2025-06-27 15:04:18,009 - INFO - 33: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_174.vtk +2025-06-27 15:04:18,009 - INFO - 34: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_104.vtk +2025-06-27 15:04:18,009 - INFO - 35: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_014.vtk +2025-06-27 15:04:18,009 - INFO - 36: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_072.vtk +2025-06-27 15:04:18,009 - INFO - 37: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_146.vtk +2025-06-27 15:04:18,010 - INFO - 38: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_194.vtk +2025-06-27 15:04:18,010 - INFO - 39: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_066.vtk +2025-06-27 15:04:18,010 - INFO - 40: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_067.vtk +2025-06-27 15:04:18,010 - INFO - 41: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_130.vtk +2025-06-27 15:04:18,010 - INFO - 42: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_131.vtk +2025-06-27 15:04:18,010 - INFO - 43: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_182.vtk +2025-06-27 15:04:18,010 - INFO - 44: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_049.vtk +2025-06-27 15:04:18,010 - INFO - 45: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_022.vtk +2025-06-27 15:04:18,010 - INFO - 46: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_042.vtk +2025-06-27 15:04:18,010 - INFO - 47: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_036.vtk +2025-06-27 15:04:18,010 - INFO - 48: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_137.vtk +2025-06-27 15:04:18,010 - INFO - 49: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_187.vtk +2025-06-27 15:04:18,010 - INFO - 50: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_195.vtk +2025-06-27 15:04:18,010 - INFO - 51: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_180.vtk +2025-06-27 15:04:18,010 - INFO - 52: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_119.vtk +2025-06-27 15:04:18,010 - INFO - 53: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_169.vtk +2025-06-27 15:04:18,010 - INFO - 54: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_092.vtk +2025-06-27 15:04:18,010 - INFO - 55: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_001.vtk +2025-06-27 15:04:18,010 - INFO - 56: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_021.vtk +2025-06-27 15:04:18,011 - INFO - 57: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_192.vtk +2025-06-27 15:04:18,011 - INFO - 58: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_100.vtk +2025-06-27 15:04:18,011 - INFO - 59: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_108.vtk +2025-06-27 15:04:18,011 - INFO - 60: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_093.vtk +2025-06-27 15:04:18,011 - INFO - 61: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_149.vtk +2025-06-27 15:04:18,011 - INFO - 62: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_060.vtk +2025-06-27 15:04:18,011 - INFO - 63: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_027.vtk +2025-06-27 15:04:18,011 - INFO - 64: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_061.vtk +2025-06-27 15:04:18,011 - INFO - 65: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_096.vtk +2025-06-27 15:04:18,011 - INFO - 66: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_039.vtk +2025-06-27 15:04:18,011 - INFO - 67: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_118.vtk +2025-06-27 15:04:18,011 - INFO - 68: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_122.vtk +2025-06-27 15:04:18,011 - INFO - 69: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_153.vtk +2025-06-27 15:04:18,011 - INFO - 70: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_144.vtk +2025-06-27 15:04:18,011 - INFO - 71: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_138.vtk +2025-06-27 15:04:18,011 - INFO - 72: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_068.vtk +2025-06-27 15:04:18,011 - INFO - 73: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_115.vtk +2025-06-27 15:04:18,011 - INFO - 74: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_028.vtk +2025-06-27 15:04:18,011 - INFO - 75: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_136.vtk +2025-06-27 15:04:18,011 - INFO - 76: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_085.vtk +2025-06-27 15:04:18,011 - INFO - 77: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_097.vtk +2025-06-27 15:04:18,011 - INFO - 78: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_140.vtk +2025-06-27 15:04:18,011 - INFO - 79: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_190.vtk +2025-06-27 15:04:18,011 - INFO - 80: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_150.vtk +2025-06-27 15:04:18,011 - INFO - 81: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_170.vtk +2025-06-27 15:04:18,011 - INFO - 82: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_095.vtk +2025-06-27 15:04:18,011 - INFO - 83: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_071.vtk +2025-06-27 15:04:18,011 - INFO - 84: /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_155.vtk +2025-06-27 15:04:18,012 - INFO - Type of full_dataset: +2025-06-27 15:04:18,012 - INFO - Number of samples of full_dataset: 130 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_073.vtk: 0 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_051.vtk: 1 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_099.vtk: 2 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_009.vtk: 3 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_164.vtk: 4 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_166.vtk: 5 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_030.vtk: 6 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_132.vtk: 7 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_114.vtk: 8 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_107.vtk: 9 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_186.vtk: 10 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_175.vtk: 11 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_079.vtk: 12 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_018.vtk: 13 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_015.vtk: 14 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_191.vtk: 15 +2025-06-27 15:04:18,012 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_084.vtk: 16 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_032.vtk: 17 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_129.vtk: 18 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_080.vtk: 19 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_010.vtk: 20 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_094.vtk: 21 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_078.vtk: 22 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_035.vtk: 23 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_003.vtk: 24 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_020.vtk: 25 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_142.vtk: 26 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_005.vtk: 27 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_112.vtk: 28 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_125.vtk: 29 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_024.vtk: 30 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_106.vtk: 31 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_161.vtk: 32 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_171.vtk: 33 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_156.vtk: 34 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_053.vtk: 35 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_160.vtk: 36 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_059.vtk: 37 +2025-06-27 15:04:18,013 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_082.vtk: 38 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_109.vtk: 39 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_026.vtk: 40 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_139.vtk: 41 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_055.vtk: 42 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_088.vtk: 43 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_174.vtk: 44 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_104.vtk: 45 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_168.vtk: 46 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_014.vtk: 47 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_072.vtk: 48 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_147.vtk: 49 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_146.vtk: 50 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_194.vtk: 51 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_141.vtk: 52 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_002.vtk: 53 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_066.vtk: 54 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_050.vtk: 55 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_067.vtk: 56 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_130.vtk: 57 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_025.vtk: 58 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_019.vtk: 59 +2025-06-27 15:04:18,014 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_131.vtk: 60 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_182.vtk: 61 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_049.vtk: 62 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_022.vtk: 63 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_042.vtk: 64 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_177.vtk: 65 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_036.vtk: 66 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_137.vtk: 67 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_187.vtk: 68 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_195.vtk: 69 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_180.vtk: 70 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_178.vtk: 71 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_119.vtk: 72 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_169.vtk: 73 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_189.vtk: 74 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_092.vtk: 75 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_116.vtk: 76 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_001.vtk: 77 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_029.vtk: 78 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_021.vtk: 79 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_192.vtk: 80 +2025-06-27 15:04:18,015 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_100.vtk: 81 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_087.vtk: 82 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_108.vtk: 83 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_093.vtk: 84 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_034.vtk: 85 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_159.vtk: 86 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_089.vtk: 87 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_065.vtk: 88 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_149.vtk: 89 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_075.vtk: 90 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_060.vtk: 91 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_148.vtk: 92 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_027.vtk: 93 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_061.vtk: 94 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_096.vtk: 95 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_039.vtk: 96 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_058.vtk: 97 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_118.vtk: 98 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_091.vtk: 99 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_057.vtk: 100 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_122.vtk: 101 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_153.vtk: 102 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_144.vtk: 103 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_138.vtk: 104 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_068.vtk: 105 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_115.vtk: 106 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_135.vtk: 107 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_044.vtk: 108 +2025-06-27 15:04:18,016 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_028.vtk: 109 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_136.vtk: 110 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_085.vtk: 111 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_097.vtk: 112 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_121.vtk: 113 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_017.vtk: 114 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_140.vtk: 115 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_190.vtk: 116 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_023.vtk: 117 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_047.vtk: 118 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_150.vtk: 119 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_070.vtk: 120 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_170.vtk: 121 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_110.vtk: 122 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_081.vtk: 123 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_172.vtk: 124 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_095.vtk: 125 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_071.vtk: 126 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_117.vtk: 127 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_155.vtk: 128 +2025-06-27 15:04:18,017 - INFO - /work/mae-zhangbj/Data_Pressure/Pressure_VTK/N_S_WWS_WM_043.vtk: 129 +2025-06-27 15:04:18,017 - INFO - Data loaded: 8 training batches, 2 validation batches, 2 test batches +2025-06-27 15:04:18,019 - INFO - Staring training for 10 epochs +2025-06-27 15:43:58,579 - INFO - args.exp_name : Train_Test +2025-06-27 15:43:58,580 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=10, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 15:43:58,580 - INFO - Starting training with 1 GPUs +2025-06-27 15:44:01,742 - INFO - Total trainable parameters: 1437705 +2025-06-27 15:44:01,804 - INFO - Data loaded: 8 training batches, 2 validation batches, 2 test batches +2025-06-27 15:44:01,806 - INFO - Staring training for 10 epochs +2025-06-27 15:49:40,636 - INFO - args.exp_name : Train_Test +2025-06-27 15:49:40,637 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=16, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 15:49:40,637 - INFO - Starting training with 1 GPUs +2025-06-27 15:49:44,651 - INFO - Total trainable parameters: 1437705 +2025-06-27 15:49:44,720 - INFO - Data loaded: 5 training batches, 1 validation batches, 1 test batches +2025-06-27 15:49:44,727 - INFO - Staring training for 50 epochs +2025-06-27 17:44:02,011 - INFO - args.exp_name : Train_Test +2025-06-27 17:44:02,013 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=16, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 17:44:02,013 - INFO - Starting training with 1 GPUs +2025-06-27 17:44:04,501 - INFO - Total trainable parameters: 1437705 +2025-06-27 17:44:04,571 - INFO - Data loaded: 5 training batches, 1 validation batches, 1 test batches +2025-06-27 17:44:04,572 - INFO - Staring training for 50 epochs +2025-06-27 18:12:48,070 - INFO - args.exp_name : Train_Test +2025-06-27 18:12:48,070 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=16, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 18:12:48,071 - INFO - Starting training with 1 GPUs +2025-06-27 18:12:51,413 - INFO - Total trainable parameters: 1437705 +2025-06-27 18:12:51,477 - INFO - Data loaded: 5 training batches, 1 validation batches, 1 test batches +2025-06-27 18:12:51,479 - INFO - Staring training for 50 epochs +2025-06-27 21:53:16,557 - INFO - args.exp_name : Train_Test +2025-06-27 21:53:16,558 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=16, epochs=50, lr=0.001, num_workers=2, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-27 21:53:16,558 - INFO - Starting training with 1 GPUs +2025-06-27 21:53:19,645 - INFO - Total trainable parameters: 1437705 +2025-06-27 21:53:19,698 - INFO - Data loaded: 5 training batches, 1 validation batches, 1 test batches +2025-06-27 21:53:19,700 - INFO - Staring training for 50 epochs +2025-06-28 11:02:55,132 - INFO - args.exp_name : Train_Test +2025-06-28 11:02:55,132 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=16, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 11:02:55,133 - INFO - Starting training with 1 GPUs +2025-06-28 11:03:03,324 - INFO - Total trainable parameters: 1437705 +2025-06-28 11:03:03,418 - INFO - Data loaded: 5 training batches, 1 validation batches, 1 test batches +2025-06-28 11:03:03,420 - INFO - Staring training for 50 epochs +2025-06-28 11:06:12,746 - INFO - args.exp_name : Train_Test +2025-06-28 11:06:12,747 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=16, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 11:06:12,747 - INFO - Starting training with 1 GPUs +2025-06-28 11:06:16,128 - INFO - Total trainable parameters: 1437705 +2025-06-28 11:06:16,172 - INFO - Data loaded: 5 training batches, 1 validation batches, 1 test batches +2025-06-28 11:06:16,174 - INFO - Staring training for 50 epochs +2025-06-28 11:09:32,911 - INFO - args.exp_name : Train_Test +2025-06-28 11:09:32,912 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=16, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 11:09:32,912 - INFO - Starting training with 1 GPUs +2025-06-28 11:09:35,225 - INFO - Total trainable parameters: 1437705 +2025-06-28 11:09:35,267 - INFO - Data loaded: 5 training batches, 1 validation batches, 1 test batches +2025-06-28 11:09:35,269 - INFO - Staring training for 50 epochs +2025-06-28 11:18:10,980 - INFO - args.exp_name : Train_Test +2025-06-28 11:18:10,981 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=16, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 11:18:10,981 - INFO - Starting training with 1 GPUs +2025-06-28 11:18:13,412 - INFO - Total trainable parameters: 1437705 +2025-06-28 11:18:13,459 - INFO - Data loaded: 5 training batches, 1 validation batches, 1 test batches +2025-06-28 11:18:13,461 - INFO - Staring training for 50 epochs +2025-06-28 11:22:54,036 - INFO - args.exp_name : Train_Test +2025-06-28 11:22:54,037 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=16, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 11:22:54,037 - INFO - Starting training with 1 GPUs +2025-06-28 11:22:56,720 - INFO - Total trainable parameters: 1437705 +2025-06-28 11:22:56,763 - INFO - Data loaded: 5 training batches, 1 validation batches, 1 test batches +2025-06-28 11:22:56,765 - INFO - Staring training for 50 epochs +2025-06-28 11:23:01,378 - INFO - outputs.shape: torch.Size([16, 1, 10000]) +2025-06-28 11:25:43,742 - INFO - args.exp_name : Train_Test +2025-06-28 11:25:43,743 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=16, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 11:25:43,743 - INFO - Starting training with 1 GPUs +2025-06-28 11:25:46,092 - INFO - Total trainable parameters: 1437705 +2025-06-28 11:25:46,136 - INFO - Data loaded: 5 training batches, 1 validation batches, 1 test batches +2025-06-28 11:25:46,138 - INFO - Staring training for 50 epochs +2025-06-28 11:25:50,267 - INFO - outputs.shape: torch.Size([16, 1, 10000]) +2025-06-28 11:34:47,559 - INFO - args.exp_name : Train_Test +2025-06-28 11:34:47,564 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=16, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 11:34:47,564 - INFO - Starting training with 1 GPUs +2025-06-28 11:34:55,849 - INFO - Total trainable parameters: 1437705 +2025-06-28 11:34:55,909 - INFO - Data loaded: 5 training batches, 1 validation batches, 1 test batches +2025-06-28 11:34:55,914 - INFO - Staring training for 50 epochs +2025-06-28 11:35:08,690 - INFO - outputs.shape: torch.Size([16, 1, 10000]) +2025-06-28 11:39:15,633 - INFO - args.exp_name : Train_Test +2025-06-28 11:39:15,636 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=16, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 11:39:15,636 - INFO - Starting training with 1 GPUs +2025-06-28 11:39:21,776 - INFO - Total trainable parameters: 1437705 +2025-06-28 11:39:21,831 - INFO - Data loaded: 5 training batches, 1 validation batches, 1 test batches +2025-06-28 11:39:21,842 - INFO - Staring training for 50 epochs +2025-06-28 11:39:30,600 - INFO - outputs.shape: torch.Size([16, 1, 10000]) +2025-06-28 11:59:22,911 - INFO - args.exp_name : Train_Test +2025-06-28 11:59:22,913 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 11:59:22,913 - INFO - Starting training with 1 GPUs +2025-06-28 11:59:28,699 - INFO - Total trainable parameters: 1437705 +2025-06-28 11:59:28,758 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 11:59:28,764 - INFO - Staring training for 50 epochs +2025-06-28 11:59:37,355 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:37,400 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:37,638 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:37,887 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:38,132 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:38,377 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:38,624 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:38,868 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:39,111 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:39,356 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:44,646 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:44,906 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:45,150 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:45,395 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:45,640 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:45,885 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:46,129 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:46,374 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:46,618 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:46,863 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:52,069 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:52,330 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:52,575 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:52,819 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:53,063 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:53,308 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:53,553 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:53,797 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:54,043 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:54,287 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:58,308 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:58,568 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:58,813 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:59,057 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:59,302 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:59,547 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 11:59:59,791 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:00,036 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:00,281 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:00,525 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:04,756 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:05,015 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:05,259 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:05,504 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:05,749 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:05,993 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:06,238 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:06,482 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:06,727 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:06,972 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:11,802 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:12,059 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:12,304 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:12,549 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:12,793 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:13,038 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:13,283 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:13,528 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:13,772 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:14,017 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:18,747 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:19,013 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:19,258 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:19,502 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:19,747 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:19,992 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:20,236 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:20,481 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:20,725 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:20,970 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:23,928 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:24,198 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:24,443 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:24,687 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:24,932 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:25,177 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:25,421 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:25,666 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:25,911 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:26,155 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:29,211 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:29,476 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:29,722 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:29,966 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:30,211 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:30,455 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:30,700 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:30,945 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:31,189 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:31,434 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:36,549 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:36,810 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:37,055 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:37,300 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:37,545 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:37,789 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:38,034 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:38,279 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:38,523 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:38,768 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:43,714 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:43,977 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:44,222 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:44,466 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:44,711 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:44,956 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:45,200 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:45,445 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:45,689 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:45,934 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:50,584 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:50,844 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:51,089 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:51,333 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:51,578 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:51,822 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:52,067 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:52,312 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:52,557 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:52,801 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:57,860 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:58,127 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:58,373 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:58,618 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:58,863 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:59,107 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:59,352 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:59,597 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:00:59,841 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:00,086 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:09,331 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:09,603 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:09,847 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:10,092 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:10,337 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:10,581 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:10,826 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:11,070 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:11,315 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:11,560 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:20,314 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:20,575 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:20,819 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:21,064 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:21,308 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:21,553 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:21,820 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:22,065 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:22,309 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:22,554 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:28,372 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:28,631 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:28,876 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:29,121 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:29,365 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:29,610 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:29,855 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:30,099 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:30,344 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:30,588 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:37,979 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:38,242 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:38,487 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:38,732 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:38,978 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:39,224 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:39,469 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:39,713 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:39,959 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:40,203 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:47,944 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:48,215 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:48,460 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:48,704 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:48,949 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:49,194 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:49,438 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:49,683 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:49,927 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:50,172 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:57,069 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:57,335 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:57,580 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:57,825 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:58,070 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:58,315 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:58,565 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:58,806 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:59,049 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:01:59,293 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:06,431 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:06,701 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:06,948 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:07,191 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:07,437 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:07,680 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:07,925 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:08,169 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:08,414 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:08,659 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:17,114 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:17,381 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:17,625 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:17,870 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:18,114 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:18,359 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:18,604 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:18,849 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:19,095 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:19,338 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:23,749 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:24,015 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:24,260 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:24,504 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:24,749 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:24,994 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:25,238 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:25,483 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:25,727 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:25,972 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:30,140 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:30,410 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:30,655 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:30,900 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:31,144 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:31,389 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:31,634 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:31,878 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:32,123 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:32,368 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:36,401 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:36,669 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:36,913 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:37,158 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:37,402 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:37,647 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:37,892 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:38,136 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:38,381 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:38,625 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:44,232 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:44,501 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:44,746 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:44,991 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:45,236 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:45,481 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:45,725 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:45,970 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:46,214 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:46,459 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:51,072 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:51,340 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:51,584 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:51,829 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:52,074 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:52,318 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:52,563 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:52,808 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:53,052 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:53,297 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:57,838 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:58,104 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:58,350 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:58,595 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:58,840 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:59,084 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:59,329 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:59,574 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:02:59,818 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:00,063 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:02,917 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:03,187 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:03,432 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:03,677 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:03,921 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:04,166 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:04,411 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:04,655 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:04,900 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:05,145 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:08,462 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:08,720 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:08,965 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:09,210 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:09,454 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:09,699 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:09,943 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:10,188 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:10,433 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:10,677 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:15,261 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:15,523 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:15,767 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:16,011 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:16,256 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:16,501 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:16,745 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:16,990 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:17,235 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:17,479 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:20,558 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:20,825 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:21,071 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:21,315 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:21,560 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:21,805 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:22,049 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:22,294 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:22,538 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:22,783 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:26,025 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:26,295 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:26,540 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:26,784 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:27,029 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:27,274 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:27,519 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:27,763 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:28,008 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:28,253 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:33,128 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:33,392 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:33,636 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:33,881 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:34,125 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:34,370 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:34,615 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:34,859 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:35,104 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:35,348 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:39,987 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:40,249 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:40,494 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:40,738 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:40,983 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:41,228 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:41,472 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:41,717 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:41,962 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:42,206 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:45,167 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:45,433 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:45,678 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:45,923 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:46,168 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:46,412 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:46,657 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:46,901 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:47,146 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:47,391 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:51,498 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:51,754 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:51,999 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:52,244 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:52,488 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:52,733 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:52,978 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:53,222 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:53,467 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:53,712 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:57,043 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:57,307 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:57,551 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:57,796 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:58,041 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:58,286 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:58,530 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:58,775 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:59,019 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:03:59,264 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:03,654 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:03,921 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:04,165 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:04,410 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:04,654 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:04,899 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:05,144 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:05,388 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:05,633 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:05,878 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:09,953 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:10,213 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:10,458 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:10,703 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:10,948 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:11,192 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:11,437 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:11,682 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:11,926 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:12,171 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:15,099 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:15,367 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:15,611 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:15,856 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:16,101 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:16,346 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:16,590 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:16,835 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:17,080 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:17,325 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:22,045 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:22,302 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:22,546 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:22,791 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:23,036 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:23,280 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:23,525 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:23,769 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:24,014 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:24,259 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:27,576 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:27,842 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:28,087 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:28,332 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:28,576 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:28,821 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:29,066 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:29,310 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:29,555 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:29,799 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:32,816 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:33,082 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:33,327 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:33,571 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:33,816 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:34,061 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:34,305 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:34,550 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:34,795 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:35,039 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:38,476 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:38,739 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:38,984 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:39,228 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:39,473 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:39,718 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:39,962 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:40,207 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:40,452 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:40,696 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:46,141 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:46,401 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:46,646 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:46,891 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:47,135 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:47,380 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:47,625 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:47,869 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:48,114 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:48,359 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:51,994 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:52,252 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:52,497 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:52,741 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:52,986 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:53,231 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:53,475 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:53,720 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:53,964 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:54,209 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:58,213 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:58,472 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:58,716 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:58,961 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:59,206 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:59,450 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:59,695 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:04:59,939 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:00,184 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:00,429 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:04,847 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:05,106 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:05,351 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:05,595 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:05,840 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:06,085 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:06,330 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:06,574 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:06,819 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:07,064 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:10,145 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:10,416 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:10,661 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:10,905 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:11,150 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:11,394 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:11,639 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:11,884 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:12,128 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:12,373 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:16,738 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:16,997 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:17,243 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:17,488 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:17,732 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:17,976 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:18,221 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:18,466 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:18,711 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:05:18,955 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:35:43,879 - INFO - args.exp_name : Train_Test +2025-06-28 12:35:43,880 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 12:35:43,880 - INFO - Starting training with 1 GPUs +2025-06-28 12:35:49,317 - INFO - Total trainable parameters: 1437705 +2025-06-28 12:35:49,371 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 12:35:49,375 - INFO - Staring training for 50 epochs +2025-06-28 12:35:58,644 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:35:58,671 - INFO - loss type: +2025-06-28 12:35:59,024 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:35:59,024 - INFO - loss type: +2025-06-28 12:35:59,403 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:35:59,403 - INFO - loss type: +2025-06-28 12:35:59,783 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:35:59,783 - INFO - loss type: +2025-06-28 12:36:00,165 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:00,165 - INFO - loss type: +2025-06-28 12:36:00,549 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:00,549 - INFO - loss type: +2025-06-28 12:36:00,929 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:00,930 - INFO - loss type: +2025-06-28 12:36:01,331 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:01,331 - INFO - loss type: +2025-06-28 12:36:01,718 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:01,719 - INFO - loss type: +2025-06-28 12:36:02,102 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:02,102 - INFO - loss type: +2025-06-28 12:36:07,652 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:07,653 - INFO - loss type: +2025-06-28 12:36:08,048 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:08,048 - INFO - loss type: +2025-06-28 12:36:08,428 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:08,428 - INFO - loss type: +2025-06-28 12:36:08,808 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:08,808 - INFO - loss type: +2025-06-28 12:36:09,188 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:09,188 - INFO - loss type: +2025-06-28 12:36:09,568 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:09,568 - INFO - loss type: +2025-06-28 12:36:09,948 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:09,948 - INFO - loss type: +2025-06-28 12:36:10,328 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:10,328 - INFO - loss type: +2025-06-28 12:36:10,709 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:10,709 - INFO - loss type: +2025-06-28 12:36:11,089 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:11,090 - INFO - loss type: +2025-06-28 12:36:15,948 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:15,948 - INFO - loss type: +2025-06-28 12:36:16,352 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:16,352 - INFO - loss type: +2025-06-28 12:36:16,732 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:16,732 - INFO - loss type: +2025-06-28 12:36:17,112 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:17,112 - INFO - loss type: +2025-06-28 12:36:17,491 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:17,491 - INFO - loss type: +2025-06-28 12:36:17,872 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:17,873 - INFO - loss type: +2025-06-28 12:36:18,252 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:18,253 - INFO - loss type: +2025-06-28 12:36:18,632 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:18,632 - INFO - loss type: +2025-06-28 12:36:19,013 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:19,013 - INFO - loss type: +2025-06-28 12:36:19,393 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:19,394 - INFO - loss type: +2025-06-28 12:36:22,998 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:22,999 - INFO - loss type: +2025-06-28 12:36:23,407 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:23,407 - INFO - loss type: +2025-06-28 12:36:23,787 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:23,787 - INFO - loss type: +2025-06-28 12:36:24,167 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:24,167 - INFO - loss type: +2025-06-28 12:36:24,547 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:24,548 - INFO - loss type: +2025-06-28 12:36:24,927 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:24,927 - INFO - loss type: +2025-06-28 12:36:25,307 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:25,308 - INFO - loss type: +2025-06-28 12:36:25,688 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:25,688 - INFO - loss type: +2025-06-28 12:36:26,068 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:26,069 - INFO - loss type: +2025-06-28 12:36:26,449 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:26,449 - INFO - loss type: +2025-06-28 12:36:30,990 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:30,990 - INFO - loss type: +2025-06-28 12:36:31,384 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:31,384 - INFO - loss type: +2025-06-28 12:36:31,764 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:31,764 - INFO - loss type: +2025-06-28 12:36:32,144 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:32,144 - INFO - loss type: +2025-06-28 12:36:32,524 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:32,524 - INFO - loss type: +2025-06-28 12:36:32,904 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:32,904 - INFO - loss type: +2025-06-28 12:36:33,285 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:33,285 - INFO - loss type: +2025-06-28 12:36:33,665 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:33,665 - INFO - loss type: +2025-06-28 12:36:34,045 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:34,046 - INFO - loss type: +2025-06-28 12:36:34,425 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:34,425 - INFO - loss type: +2025-06-28 12:36:38,239 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:38,240 - INFO - loss type: +2025-06-28 12:36:38,631 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:38,631 - INFO - loss type: +2025-06-28 12:36:39,011 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:39,011 - INFO - loss type: +2025-06-28 12:36:39,393 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:39,393 - INFO - loss type: +2025-06-28 12:36:39,772 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:39,772 - INFO - loss type: +2025-06-28 12:36:40,152 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:40,153 - INFO - loss type: +2025-06-28 12:36:40,533 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:40,533 - INFO - loss type: +2025-06-28 12:36:40,913 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:40,913 - INFO - loss type: +2025-06-28 12:36:41,293 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:41,293 - INFO - loss type: +2025-06-28 12:36:41,673 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:41,674 - INFO - loss type: +2025-06-28 12:36:45,662 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:45,665 - INFO - loss type: +2025-06-28 12:36:46,057 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:46,057 - INFO - loss type: +2025-06-28 12:36:46,437 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:46,437 - INFO - loss type: +2025-06-28 12:36:46,817 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:46,817 - INFO - loss type: +2025-06-28 12:36:47,198 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:47,198 - INFO - loss type: +2025-06-28 12:36:47,578 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:47,578 - INFO - loss type: +2025-06-28 12:36:47,958 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:47,959 - INFO - loss type: +2025-06-28 12:36:48,339 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:48,339 - INFO - loss type: +2025-06-28 12:36:48,719 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:48,719 - INFO - loss type: +2025-06-28 12:36:49,099 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:49,099 - INFO - loss type: +2025-06-28 12:36:54,826 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:54,826 - INFO - loss type: +2025-06-28 12:36:55,227 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:55,228 - INFO - loss type: +2025-06-28 12:36:55,607 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:55,608 - INFO - loss type: +2025-06-28 12:36:55,987 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:55,988 - INFO - loss type: +2025-06-28 12:36:56,368 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:56,368 - INFO - loss type: +2025-06-28 12:36:56,748 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:56,748 - INFO - loss type: +2025-06-28 12:36:57,128 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:57,129 - INFO - loss type: +2025-06-28 12:36:57,509 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:57,522 - INFO - loss type: +2025-06-28 12:36:57,889 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:57,889 - INFO - loss type: +2025-06-28 12:36:58,269 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:36:58,269 - INFO - loss type: +2025-06-28 12:37:03,081 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:03,083 - INFO - loss type: +2025-06-28 12:37:03,481 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:03,481 - INFO - loss type: +2025-06-28 12:37:03,862 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:03,863 - INFO - loss type: +2025-06-28 12:37:04,242 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:04,242 - INFO - loss type: +2025-06-28 12:37:04,622 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:04,622 - INFO - loss type: +2025-06-28 12:37:05,002 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:05,002 - INFO - loss type: +2025-06-28 12:37:05,382 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:05,382 - INFO - loss type: +2025-06-28 12:37:05,762 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:05,763 - INFO - loss type: +2025-06-28 12:37:06,142 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:06,142 - INFO - loss type: +2025-06-28 12:37:06,523 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:06,523 - INFO - loss type: +2025-06-28 12:37:14,332 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:14,334 - INFO - loss type: +2025-06-28 12:37:14,734 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:14,734 - INFO - loss type: +2025-06-28 12:37:15,114 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:15,114 - INFO - loss type: +2025-06-28 12:37:15,494 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:15,495 - INFO - loss type: +2025-06-28 12:37:15,875 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:15,875 - INFO - loss type: +2025-06-28 12:37:16,255 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:16,255 - INFO - loss type: +2025-06-28 12:37:16,635 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:16,635 - INFO - loss type: +2025-06-28 12:37:17,015 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:17,016 - INFO - loss type: +2025-06-28 12:37:17,395 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:17,396 - INFO - loss type: +2025-06-28 12:37:17,776 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:17,776 - INFO - loss type: +2025-06-28 12:37:23,342 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:23,345 - INFO - loss type: +2025-06-28 12:37:23,746 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:23,747 - INFO - loss type: +2025-06-28 12:37:24,127 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:24,127 - INFO - loss type: +2025-06-28 12:37:24,507 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:24,507 - INFO - loss type: +2025-06-28 12:37:24,887 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:24,888 - INFO - loss type: +2025-06-28 12:37:25,268 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:25,268 - INFO - loss type: +2025-06-28 12:37:25,648 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:25,649 - INFO - loss type: +2025-06-28 12:37:26,028 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:26,029 - INFO - loss type: +2025-06-28 12:37:26,408 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:26,409 - INFO - loss type: +2025-06-28 12:37:26,789 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:26,789 - INFO - loss type: +2025-06-28 12:37:30,373 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:30,378 - INFO - loss type: +2025-06-28 12:37:30,778 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:30,778 - INFO - loss type: +2025-06-28 12:37:31,159 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:31,159 - INFO - loss type: +2025-06-28 12:37:31,539 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:31,539 - INFO - loss type: +2025-06-28 12:37:31,919 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:31,919 - INFO - loss type: +2025-06-28 12:37:32,299 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:32,299 - INFO - loss type: +2025-06-28 12:37:32,681 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:32,681 - INFO - loss type: +2025-06-28 12:37:33,061 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:33,061 - INFO - loss type: +2025-06-28 12:37:33,441 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:33,441 - INFO - loss type: +2025-06-28 12:37:33,822 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:33,822 - INFO - loss type: +2025-06-28 12:37:37,620 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:37,620 - INFO - loss type: +2025-06-28 12:37:38,024 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:38,024 - INFO - loss type: +2025-06-28 12:37:38,404 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:38,404 - INFO - loss type: +2025-06-28 12:37:38,784 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:38,784 - INFO - loss type: +2025-06-28 12:37:39,165 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:39,165 - INFO - loss type: +2025-06-28 12:37:39,545 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:39,545 - INFO - loss type: +2025-06-28 12:37:39,925 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:39,925 - INFO - loss type: +2025-06-28 12:37:40,305 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:40,306 - INFO - loss type: +2025-06-28 12:37:40,686 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:40,686 - INFO - loss type: +2025-06-28 12:37:41,066 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:41,066 - INFO - loss type: +2025-06-28 12:37:48,497 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:48,499 - INFO - loss type: +2025-06-28 12:37:48,902 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:48,902 - INFO - loss type: +2025-06-28 12:37:49,282 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:49,282 - INFO - loss type: +2025-06-28 12:37:49,663 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:49,663 - INFO - loss type: +2025-06-28 12:37:50,043 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:50,043 - INFO - loss type: +2025-06-28 12:37:50,423 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:50,424 - INFO - loss type: +2025-06-28 12:37:50,803 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:50,804 - INFO - loss type: +2025-06-28 12:37:51,184 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:51,185 - INFO - loss type: +2025-06-28 12:37:51,564 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:51,564 - INFO - loss type: +2025-06-28 12:37:51,944 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:51,944 - INFO - loss type: +2025-06-28 12:37:59,320 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:59,323 - INFO - loss type: +2025-06-28 12:37:59,725 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:37:59,725 - INFO - loss type: +2025-06-28 12:38:00,105 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:00,105 - INFO - loss type: +2025-06-28 12:38:00,485 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:00,486 - INFO - loss type: +2025-06-28 12:38:00,866 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:00,866 - INFO - loss type: +2025-06-28 12:38:01,246 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:01,246 - INFO - loss type: +2025-06-28 12:38:01,626 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:01,626 - INFO - loss type: +2025-06-28 12:38:02,006 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:02,006 - INFO - loss type: +2025-06-28 12:38:02,386 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:02,386 - INFO - loss type: +2025-06-28 12:38:02,767 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:02,767 - INFO - loss type: +2025-06-28 12:38:07,937 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:07,938 - INFO - loss type: +2025-06-28 12:38:08,347 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:08,348 - INFO - loss type: +2025-06-28 12:38:08,727 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:08,728 - INFO - loss type: +2025-06-28 12:38:09,110 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:09,110 - INFO - loss type: +2025-06-28 12:38:09,494 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:09,494 - INFO - loss type: +2025-06-28 12:38:09,878 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:09,879 - INFO - loss type: +2025-06-28 12:38:10,258 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:10,258 - INFO - loss type: +2025-06-28 12:38:10,640 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:10,640 - INFO - loss type: +2025-06-28 12:38:11,021 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:11,022 - INFO - loss type: +2025-06-28 12:38:11,401 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:11,401 - INFO - loss type: +2025-06-28 12:38:14,939 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:14,939 - INFO - loss type: +2025-06-28 12:38:15,343 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:15,344 - INFO - loss type: +2025-06-28 12:38:15,723 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:15,723 - INFO - loss type: +2025-06-28 12:38:16,103 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:16,104 - INFO - loss type: +2025-06-28 12:38:16,484 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:16,484 - INFO - loss type: +2025-06-28 12:38:16,864 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:16,864 - INFO - loss type: +2025-06-28 12:38:17,244 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:17,244 - INFO - loss type: +2025-06-28 12:38:17,625 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:17,625 - INFO - loss type: +2025-06-28 12:38:18,005 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:18,005 - INFO - loss type: +2025-06-28 12:38:18,385 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:18,385 - INFO - loss type: +2025-06-28 12:38:23,456 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:23,465 - INFO - loss type: +2025-06-28 12:38:23,858 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:23,859 - INFO - loss type: +2025-06-28 12:38:24,238 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:24,239 - INFO - loss type: +2025-06-28 12:38:24,618 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:24,619 - INFO - loss type: +2025-06-28 12:38:24,999 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:24,999 - INFO - loss type: +2025-06-28 12:38:25,379 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:25,379 - INFO - loss type: +2025-06-28 12:38:25,760 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:25,760 - INFO - loss type: +2025-06-28 12:38:26,139 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:26,140 - INFO - loss type: +2025-06-28 12:38:26,520 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:26,520 - INFO - loss type: +2025-06-28 12:38:26,900 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:26,900 - INFO - loss type: +2025-06-28 12:38:32,211 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:32,211 - INFO - loss type: +2025-06-28 12:38:32,604 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:32,605 - INFO - loss type: +2025-06-28 12:38:32,985 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:32,985 - INFO - loss type: +2025-06-28 12:38:33,365 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:33,365 - INFO - loss type: +2025-06-28 12:38:33,745 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:33,745 - INFO - loss type: +2025-06-28 12:38:34,126 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:34,126 - INFO - loss type: +2025-06-28 12:38:34,506 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:34,506 - INFO - loss type: +2025-06-28 12:38:34,886 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:34,886 - INFO - loss type: +2025-06-28 12:38:35,266 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:35,266 - INFO - loss type: +2025-06-28 12:38:35,646 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:35,646 - INFO - loss type: +2025-06-28 12:38:40,838 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:40,838 - INFO - loss type: +2025-06-28 12:38:41,230 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:41,231 - INFO - loss type: +2025-06-28 12:38:41,611 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:41,611 - INFO - loss type: +2025-06-28 12:38:41,991 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:41,992 - INFO - loss type: +2025-06-28 12:38:42,371 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:42,372 - INFO - loss type: +2025-06-28 12:38:42,752 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:42,752 - INFO - loss type: +2025-06-28 12:38:43,132 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:43,132 - INFO - loss type: +2025-06-28 12:38:43,512 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:43,512 - INFO - loss type: +2025-06-28 12:38:43,892 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:43,892 - INFO - loss type: +2025-06-28 12:38:44,272 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:44,273 - INFO - loss type: +2025-06-28 12:38:49,446 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:49,448 - INFO - loss type: +2025-06-28 12:38:49,842 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:49,843 - INFO - loss type: +2025-06-28 12:38:50,223 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:50,223 - INFO - loss type: +2025-06-28 12:38:50,604 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:50,605 - INFO - loss type: +2025-06-28 12:38:50,984 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:50,984 - INFO - loss type: +2025-06-28 12:38:51,365 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:51,365 - INFO - loss type: +2025-06-28 12:38:51,745 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:51,745 - INFO - loss type: +2025-06-28 12:38:52,125 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:52,125 - INFO - loss type: +2025-06-28 12:38:52,505 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:52,506 - INFO - loss type: +2025-06-28 12:38:52,885 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:52,886 - INFO - loss type: +2025-06-28 12:38:56,431 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:56,431 - INFO - loss type: +2025-06-28 12:38:56,832 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:56,832 - INFO - loss type: +2025-06-28 12:38:57,212 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:57,212 - INFO - loss type: +2025-06-28 12:38:57,592 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:57,593 - INFO - loss type: +2025-06-28 12:38:57,973 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:57,973 - INFO - loss type: +2025-06-28 12:38:58,353 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:58,353 - INFO - loss type: +2025-06-28 12:38:58,733 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:58,733 - INFO - loss type: +2025-06-28 12:38:59,113 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:59,114 - INFO - loss type: +2025-06-28 12:38:59,493 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:59,494 - INFO - loss type: +2025-06-28 12:38:59,874 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:38:59,874 - INFO - loss type: +2025-06-28 12:39:04,843 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:04,843 - INFO - loss type: +2025-06-28 12:39:05,247 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:05,248 - INFO - loss type: +2025-06-28 12:39:05,627 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:05,628 - INFO - loss type: +2025-06-28 12:39:06,008 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:06,008 - INFO - loss type: +2025-06-28 12:39:06,388 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:06,388 - INFO - loss type: +2025-06-28 12:39:06,768 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:06,769 - INFO - loss type: +2025-06-28 12:39:07,148 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:07,149 - INFO - loss type: +2025-06-28 12:39:07,529 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:07,529 - INFO - loss type: +2025-06-28 12:39:07,909 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:07,909 - INFO - loss type: +2025-06-28 12:39:08,289 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:08,289 - INFO - loss type: +2025-06-28 12:39:13,667 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:13,669 - INFO - loss type: +2025-06-28 12:39:14,073 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:14,073 - INFO - loss type: +2025-06-28 12:39:14,453 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:14,453 - INFO - loss type: +2025-06-28 12:39:14,833 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:14,834 - INFO - loss type: +2025-06-28 12:39:15,213 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:15,214 - INFO - loss type: +2025-06-28 12:39:15,597 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:15,597 - INFO - loss type: +2025-06-28 12:39:15,977 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:15,978 - INFO - loss type: +2025-06-28 12:39:16,357 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:16,358 - INFO - loss type: +2025-06-28 12:39:16,738 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:16,738 - INFO - loss type: +2025-06-28 12:39:17,118 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:17,118 - INFO - loss type: +2025-06-28 12:39:22,307 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:22,308 - INFO - loss type: +2025-06-28 12:39:22,713 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:22,713 - INFO - loss type: +2025-06-28 12:39:23,093 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:23,094 - INFO - loss type: +2025-06-28 12:39:23,474 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:23,474 - INFO - loss type: +2025-06-28 12:39:23,854 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:23,855 - INFO - loss type: +2025-06-28 12:39:24,234 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:24,235 - INFO - loss type: +2025-06-28 12:39:24,615 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:24,615 - INFO - loss type: +2025-06-28 12:39:24,996 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:24,997 - INFO - loss type: +2025-06-28 12:39:25,377 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:25,377 - INFO - loss type: +2025-06-28 12:39:25,756 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:25,757 - INFO - loss type: +2025-06-28 12:39:29,774 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:29,774 - INFO - loss type: +2025-06-28 12:39:30,177 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:30,178 - INFO - loss type: +2025-06-28 12:39:30,558 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:30,558 - INFO - loss type: +2025-06-28 12:39:30,939 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:30,939 - INFO - loss type: +2025-06-28 12:39:31,318 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:31,319 - INFO - loss type: +2025-06-28 12:39:31,698 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:31,699 - INFO - loss type: +2025-06-28 12:39:32,079 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:32,079 - INFO - loss type: +2025-06-28 12:39:32,459 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:32,459 - INFO - loss type: +2025-06-28 12:39:32,841 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:32,841 - INFO - loss type: +2025-06-28 12:39:33,221 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:33,221 - INFO - loss type: +2025-06-28 12:39:36,603 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:36,603 - INFO - loss type: +2025-06-28 12:39:37,007 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:37,007 - INFO - loss type: +2025-06-28 12:39:37,387 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:37,387 - INFO - loss type: +2025-06-28 12:39:37,767 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:37,767 - INFO - loss type: +2025-06-28 12:39:38,147 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:38,147 - INFO - loss type: +2025-06-28 12:39:38,529 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:38,529 - INFO - loss type: +2025-06-28 12:39:38,909 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:38,909 - INFO - loss type: +2025-06-28 12:39:39,289 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:39,289 - INFO - loss type: +2025-06-28 12:39:39,669 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:39,670 - INFO - loss type: +2025-06-28 12:39:40,049 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:40,050 - INFO - loss type: +2025-06-28 12:39:45,319 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:45,319 - INFO - loss type: +2025-06-28 12:39:45,716 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:45,716 - INFO - loss type: +2025-06-28 12:39:46,096 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:46,096 - INFO - loss type: +2025-06-28 12:39:46,476 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:46,476 - INFO - loss type: +2025-06-28 12:39:46,856 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:46,857 - INFO - loss type: +2025-06-28 12:39:47,237 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:47,237 - INFO - loss type: +2025-06-28 12:39:47,617 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:47,617 - INFO - loss type: +2025-06-28 12:39:47,997 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:47,997 - INFO - loss type: +2025-06-28 12:39:48,377 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:48,378 - INFO - loss type: +2025-06-28 12:39:48,758 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:48,758 - INFO - loss type: +2025-06-28 12:39:54,202 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:54,202 - INFO - loss type: +2025-06-28 12:39:54,595 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:54,595 - INFO - loss type: +2025-06-28 12:39:54,975 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:54,975 - INFO - loss type: +2025-06-28 12:39:55,355 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:55,358 - INFO - loss type: +2025-06-28 12:39:55,735 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:55,736 - INFO - loss type: +2025-06-28 12:39:56,116 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:56,116 - INFO - loss type: +2025-06-28 12:39:56,496 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:56,496 - INFO - loss type: +2025-06-28 12:39:56,876 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:56,876 - INFO - loss type: +2025-06-28 12:39:57,256 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:57,257 - INFO - loss type: +2025-06-28 12:39:57,636 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:39:57,637 - INFO - loss type: +2025-06-28 12:40:02,399 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:02,399 - INFO - loss type: +2025-06-28 12:40:02,796 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:02,796 - INFO - loss type: +2025-06-28 12:40:03,176 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:03,176 - INFO - loss type: +2025-06-28 12:40:03,556 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:03,556 - INFO - loss type: +2025-06-28 12:40:03,937 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:03,937 - INFO - loss type: +2025-06-28 12:40:04,317 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:04,317 - INFO - loss type: +2025-06-28 12:40:04,697 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:04,697 - INFO - loss type: +2025-06-28 12:40:05,077 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:05,078 - INFO - loss type: +2025-06-28 12:40:05,458 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:05,458 - INFO - loss type: +2025-06-28 12:40:05,838 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:05,838 - INFO - loss type: +2025-06-28 12:40:11,059 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:11,060 - INFO - loss type: +2025-06-28 12:40:11,454 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:11,456 - INFO - loss type: +2025-06-28 12:40:11,834 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:11,834 - INFO - loss type: +2025-06-28 12:40:12,214 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:12,215 - INFO - loss type: +2025-06-28 12:40:12,595 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:12,595 - INFO - loss type: +2025-06-28 12:40:12,975 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:12,975 - INFO - loss type: +2025-06-28 12:40:13,355 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:13,356 - INFO - loss type: +2025-06-28 12:40:13,736 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:13,736 - INFO - loss type: +2025-06-28 12:40:14,116 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:14,116 - INFO - loss type: +2025-06-28 12:40:14,496 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:14,496 - INFO - loss type: +2025-06-28 12:40:17,943 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:17,943 - INFO - loss type: +2025-06-28 12:40:18,351 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:18,351 - INFO - loss type: +2025-06-28 12:40:18,731 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:18,731 - INFO - loss type: +2025-06-28 12:40:19,111 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:19,111 - INFO - loss type: +2025-06-28 12:40:19,491 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:19,492 - INFO - loss type: +2025-06-28 12:40:19,871 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:19,872 - INFO - loss type: +2025-06-28 12:40:20,251 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:20,252 - INFO - loss type: +2025-06-28 12:40:20,632 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:20,632 - INFO - loss type: +2025-06-28 12:40:21,013 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:21,014 - INFO - loss type: +2025-06-28 12:40:21,393 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:21,394 - INFO - loss type: +2025-06-28 12:40:24,969 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:24,969 - INFO - loss type: +2025-06-28 12:40:25,367 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:25,367 - INFO - loss type: +2025-06-28 12:40:25,747 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:25,747 - INFO - loss type: +2025-06-28 12:40:26,127 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:26,127 - INFO - loss type: +2025-06-28 12:40:26,507 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:26,507 - INFO - loss type: +2025-06-28 12:40:26,887 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:26,888 - INFO - loss type: +2025-06-28 12:40:27,268 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:27,268 - INFO - loss type: +2025-06-28 12:40:27,648 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:27,648 - INFO - loss type: +2025-06-28 12:40:28,028 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:28,028 - INFO - loss type: +2025-06-28 12:40:28,408 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:28,408 - INFO - loss type: +2025-06-28 12:40:33,738 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:33,738 - INFO - loss type: +2025-06-28 12:40:34,136 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:34,136 - INFO - loss type: +2025-06-28 12:40:34,517 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:34,517 - INFO - loss type: +2025-06-28 12:40:34,897 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:34,897 - INFO - loss type: +2025-06-28 12:40:35,277 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:35,277 - INFO - loss type: +2025-06-28 12:40:35,657 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:35,658 - INFO - loss type: +2025-06-28 12:40:36,038 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:36,038 - INFO - loss type: +2025-06-28 12:40:36,418 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:36,418 - INFO - loss type: +2025-06-28 12:40:36,798 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:36,799 - INFO - loss type: +2025-06-28 12:40:37,178 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:37,178 - INFO - loss type: +2025-06-28 12:40:42,092 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:42,092 - INFO - loss type: +2025-06-28 12:40:42,498 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:42,498 - INFO - loss type: +2025-06-28 12:40:42,878 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:42,878 - INFO - loss type: +2025-06-28 12:40:43,258 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:43,258 - INFO - loss type: +2025-06-28 12:40:43,638 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:43,639 - INFO - loss type: +2025-06-28 12:40:44,019 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:44,019 - INFO - loss type: +2025-06-28 12:40:44,399 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:44,399 - INFO - loss type: +2025-06-28 12:40:44,779 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:44,779 - INFO - loss type: +2025-06-28 12:40:45,159 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:45,160 - INFO - loss type: +2025-06-28 12:40:45,539 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:45,540 - INFO - loss type: +2025-06-28 12:40:50,334 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:50,334 - INFO - loss type: +2025-06-28 12:40:50,739 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:50,740 - INFO - loss type: +2025-06-28 12:40:51,120 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:51,120 - INFO - loss type: +2025-06-28 12:40:51,500 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:51,500 - INFO - loss type: +2025-06-28 12:40:51,880 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:51,880 - INFO - loss type: +2025-06-28 12:40:52,260 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:52,260 - INFO - loss type: +2025-06-28 12:40:52,640 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:52,642 - INFO - loss type: +2025-06-28 12:40:53,020 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:53,021 - INFO - loss type: +2025-06-28 12:40:53,401 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:53,401 - INFO - loss type: +2025-06-28 12:40:53,781 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:53,782 - INFO - loss type: +2025-06-28 12:40:57,133 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:57,133 - INFO - loss type: +2025-06-28 12:40:57,539 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:57,539 - INFO - loss type: +2025-06-28 12:40:57,919 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:57,919 - INFO - loss type: +2025-06-28 12:40:58,299 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:58,299 - INFO - loss type: +2025-06-28 12:40:58,679 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:58,680 - INFO - loss type: +2025-06-28 12:40:59,059 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:59,060 - INFO - loss type: +2025-06-28 12:40:59,439 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:59,440 - INFO - loss type: +2025-06-28 12:40:59,819 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:40:59,820 - INFO - loss type: +2025-06-28 12:41:00,199 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:00,200 - INFO - loss type: +2025-06-28 12:41:00,580 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:00,580 - INFO - loss type: +2025-06-28 12:41:05,588 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:05,589 - INFO - loss type: +2025-06-28 12:41:05,996 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:05,996 - INFO - loss type: +2025-06-28 12:41:06,376 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:06,376 - INFO - loss type: +2025-06-28 12:41:06,756 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:06,757 - INFO - loss type: +2025-06-28 12:41:07,137 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:07,137 - INFO - loss type: +2025-06-28 12:41:07,517 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:07,517 - INFO - loss type: +2025-06-28 12:41:07,897 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:07,897 - INFO - loss type: +2025-06-28 12:41:08,277 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:08,277 - INFO - loss type: +2025-06-28 12:41:08,657 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:08,658 - INFO - loss type: +2025-06-28 12:41:09,038 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:09,038 - INFO - loss type: +2025-06-28 12:41:13,894 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:13,894 - INFO - loss type: +2025-06-28 12:41:14,296 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:14,297 - INFO - loss type: +2025-06-28 12:41:14,676 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:14,677 - INFO - loss type: +2025-06-28 12:41:15,056 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:15,057 - INFO - loss type: +2025-06-28 12:41:15,437 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:15,437 - INFO - loss type: +2025-06-28 12:41:15,817 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:15,817 - INFO - loss type: +2025-06-28 12:41:16,197 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:16,197 - INFO - loss type: +2025-06-28 12:41:16,577 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:16,577 - INFO - loss type: +2025-06-28 12:41:16,957 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:16,957 - INFO - loss type: +2025-06-28 12:41:17,337 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:17,338 - INFO - loss type: +2025-06-28 12:41:21,878 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:21,878 - INFO - loss type: +2025-06-28 12:41:22,284 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:22,284 - INFO - loss type: +2025-06-28 12:41:22,664 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:22,665 - INFO - loss type: +2025-06-28 12:41:23,044 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:23,045 - INFO - loss type: +2025-06-28 12:41:23,425 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:23,425 - INFO - loss type: +2025-06-28 12:41:23,805 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:23,805 - INFO - loss type: +2025-06-28 12:41:24,185 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:24,185 - INFO - loss type: +2025-06-28 12:41:24,565 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:24,565 - INFO - loss type: +2025-06-28 12:41:24,945 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:24,946 - INFO - loss type: +2025-06-28 12:41:25,325 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:25,326 - INFO - loss type: +2025-06-28 12:41:29,711 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:29,711 - INFO - loss type: +2025-06-28 12:41:30,117 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:30,117 - INFO - loss type: +2025-06-28 12:41:30,497 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:30,498 - INFO - loss type: +2025-06-28 12:41:30,877 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:30,878 - INFO - loss type: +2025-06-28 12:41:31,257 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:31,258 - INFO - loss type: +2025-06-28 12:41:31,638 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:31,638 - INFO - loss type: +2025-06-28 12:41:32,018 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:32,018 - INFO - loss type: +2025-06-28 12:41:32,399 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:32,399 - INFO - loss type: +2025-06-28 12:41:32,779 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:32,779 - INFO - loss type: +2025-06-28 12:41:33,159 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:33,159 - INFO - loss type: +2025-06-28 12:41:36,530 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:36,530 - INFO - loss type: +2025-06-28 12:41:36,937 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:36,938 - INFO - loss type: +2025-06-28 12:41:37,318 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:37,318 - INFO - loss type: +2025-06-28 12:41:37,698 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:37,698 - INFO - loss type: +2025-06-28 12:41:38,078 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:38,078 - INFO - loss type: +2025-06-28 12:41:38,458 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:38,459 - INFO - loss type: +2025-06-28 12:41:38,838 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:38,839 - INFO - loss type: +2025-06-28 12:41:39,219 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:39,219 - INFO - loss type: +2025-06-28 12:41:39,599 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:39,599 - INFO - loss type: +2025-06-28 12:41:39,979 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:39,979 - INFO - loss type: +2025-06-28 12:41:44,476 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:44,476 - INFO - loss type: +2025-06-28 12:41:44,874 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:44,875 - INFO - loss type: +2025-06-28 12:41:45,254 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:45,255 - INFO - loss type: +2025-06-28 12:41:45,635 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:45,635 - INFO - loss type: +2025-06-28 12:41:46,015 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:46,015 - INFO - loss type: +2025-06-28 12:41:46,395 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:46,396 - INFO - loss type: +2025-06-28 12:41:46,776 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:46,778 - INFO - loss type: +2025-06-28 12:41:47,156 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:47,156 - INFO - loss type: +2025-06-28 12:41:47,536 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:47,536 - INFO - loss type: +2025-06-28 12:41:47,916 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:47,916 - INFO - loss type: +2025-06-28 12:41:52,382 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:52,383 - INFO - loss type: +2025-06-28 12:41:52,774 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:52,774 - INFO - loss type: +2025-06-28 12:41:53,154 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:53,154 - INFO - loss type: +2025-06-28 12:41:53,534 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:53,535 - INFO - loss type: +2025-06-28 12:41:53,915 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:53,915 - INFO - loss type: +2025-06-28 12:41:54,295 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:54,295 - INFO - loss type: +2025-06-28 12:41:54,675 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:54,675 - INFO - loss type: +2025-06-28 12:41:55,055 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:55,056 - INFO - loss type: +2025-06-28 12:41:55,436 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:55,436 - INFO - loss type: +2025-06-28 12:41:55,816 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:41:55,816 - INFO - loss type: +2025-06-28 12:42:03,909 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:03,910 - INFO - loss type: +2025-06-28 12:42:04,316 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:04,316 - INFO - loss type: +2025-06-28 12:42:04,696 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:04,697 - INFO - loss type: +2025-06-28 12:42:05,079 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:05,079 - INFO - loss type: +2025-06-28 12:42:05,461 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:05,461 - INFO - loss type: +2025-06-28 12:42:05,841 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:05,842 - INFO - loss type: +2025-06-28 12:42:06,222 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:06,222 - INFO - loss type: +2025-06-28 12:42:06,620 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:06,620 - INFO - loss type: +2025-06-28 12:42:07,018 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:07,018 - INFO - loss type: +2025-06-28 12:42:07,414 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:07,414 - INFO - loss type: +2025-06-28 12:42:12,479 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:12,481 - INFO - loss type: +2025-06-28 12:42:12,871 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:12,871 - INFO - loss type: +2025-06-28 12:42:13,251 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:13,251 - INFO - loss type: +2025-06-28 12:42:13,631 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:13,632 - INFO - loss type: +2025-06-28 12:42:14,012 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:14,012 - INFO - loss type: +2025-06-28 12:42:14,392 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:14,393 - INFO - loss type: +2025-06-28 12:42:14,772 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:14,772 - INFO - loss type: +2025-06-28 12:42:15,152 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:15,152 - INFO - loss type: +2025-06-28 12:42:15,532 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:15,533 - INFO - loss type: +2025-06-28 12:42:15,913 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:15,913 - INFO - loss type: +2025-06-28 12:42:21,026 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:21,026 - INFO - loss type: +2025-06-28 12:42:21,427 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:21,427 - INFO - loss type: +2025-06-28 12:42:21,807 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:21,808 - INFO - loss type: +2025-06-28 12:42:22,188 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:22,188 - INFO - loss type: +2025-06-28 12:42:22,568 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:22,568 - INFO - loss type: +2025-06-28 12:42:22,948 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:22,948 - INFO - loss type: +2025-06-28 12:42:23,329 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:23,329 - INFO - loss type: +2025-06-28 12:42:23,709 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:23,709 - INFO - loss type: +2025-06-28 12:42:24,089 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:24,090 - INFO - loss type: +2025-06-28 12:42:24,469 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:24,470 - INFO - loss type: +2025-06-28 12:42:28,415 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:28,415 - INFO - loss type: +2025-06-28 12:42:28,821 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:28,821 - INFO - loss type: +2025-06-28 12:42:29,201 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:29,201 - INFO - loss type: +2025-06-28 12:42:29,581 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:29,582 - INFO - loss type: +2025-06-28 12:42:29,961 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:29,962 - INFO - loss type: +2025-06-28 12:42:30,342 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:30,342 - INFO - loss type: +2025-06-28 12:42:30,721 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:30,722 - INFO - loss type: +2025-06-28 12:42:31,103 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:31,103 - INFO - loss type: +2025-06-28 12:42:31,483 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:31,483 - INFO - loss type: +2025-06-28 12:42:31,863 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:31,863 - INFO - loss type: +2025-06-28 12:42:36,073 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:36,079 - INFO - loss type: +2025-06-28 12:42:36,477 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:36,477 - INFO - loss type: +2025-06-28 12:42:36,857 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:36,857 - INFO - loss type: +2025-06-28 12:42:37,237 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:37,237 - INFO - loss type: +2025-06-28 12:42:37,618 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:37,624 - INFO - loss type: +2025-06-28 12:42:37,999 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:37,999 - INFO - loss type: +2025-06-28 12:42:38,379 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:38,379 - INFO - loss type: +2025-06-28 12:42:38,759 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:38,759 - INFO - loss type: +2025-06-28 12:42:39,139 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:39,140 - INFO - loss type: +2025-06-28 12:42:39,520 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:39,520 - INFO - loss type: +2025-06-28 12:42:43,699 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:43,699 - INFO - loss type: +2025-06-28 12:42:44,104 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:44,104 - INFO - loss type: +2025-06-28 12:42:44,485 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:44,485 - INFO - loss type: +2025-06-28 12:42:44,865 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:44,865 - INFO - loss type: +2025-06-28 12:42:45,245 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:45,245 - INFO - loss type: +2025-06-28 12:42:45,625 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:45,625 - INFO - loss type: +2025-06-28 12:42:46,005 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:46,006 - INFO - loss type: +2025-06-28 12:42:46,386 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:46,386 - INFO - loss type: +2025-06-28 12:42:46,766 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:46,767 - INFO - loss type: +2025-06-28 12:42:47,146 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 12:42:47,147 - INFO - loss type: +2025-06-28 13:00:25,137 - INFO - args.exp_name : Train_Test +2025-06-28 13:00:25,139 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 13:00:25,139 - INFO - Starting training with 1 GPUs +2025-06-28 13:00:29,871 - INFO - Total trainable parameters: 1437705 +2025-06-28 13:00:29,924 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 13:00:29,928 - INFO - Staring training for 50 epochs +2025-06-28 13:00:37,426 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:37,426 - INFO - outputs type: +2025-06-28 13:00:37,791 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:37,791 - INFO - outputs type: +2025-06-28 13:00:38,171 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:38,171 - INFO - outputs type: +2025-06-28 13:00:38,551 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:38,551 - INFO - outputs type: +2025-06-28 13:00:38,932 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:38,932 - INFO - outputs type: +2025-06-28 13:00:39,312 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:39,313 - INFO - outputs type: +2025-06-28 13:00:39,693 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:39,693 - INFO - outputs type: +2025-06-28 13:00:40,073 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:40,074 - INFO - outputs type: +2025-06-28 13:00:40,453 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:40,453 - INFO - outputs type: +2025-06-28 13:00:40,835 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:40,835 - INFO - outputs type: +2025-06-28 13:00:47,607 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:47,610 - INFO - outputs type: +2025-06-28 13:00:48,003 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:48,003 - INFO - outputs type: +2025-06-28 13:00:48,383 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:48,383 - INFO - outputs type: +2025-06-28 13:00:48,763 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:48,763 - INFO - outputs type: +2025-06-28 13:00:49,143 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:49,143 - INFO - outputs type: +2025-06-28 13:00:49,523 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:49,523 - INFO - outputs type: +2025-06-28 13:00:49,903 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:49,904 - INFO - outputs type: +2025-06-28 13:00:50,284 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:50,284 - INFO - outputs type: +2025-06-28 13:00:50,664 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:50,664 - INFO - outputs type: +2025-06-28 13:00:51,044 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:51,044 - INFO - outputs type: +2025-06-28 13:00:56,456 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:56,458 - INFO - outputs type: +2025-06-28 13:00:56,852 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:56,852 - INFO - outputs type: +2025-06-28 13:00:57,232 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:57,232 - INFO - outputs type: +2025-06-28 13:00:57,612 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:57,612 - INFO - outputs type: +2025-06-28 13:00:57,992 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:57,992 - INFO - outputs type: +2025-06-28 13:00:58,372 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:58,373 - INFO - outputs type: +2025-06-28 13:00:58,754 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:58,754 - INFO - outputs type: +2025-06-28 13:00:59,134 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:59,134 - INFO - outputs type: +2025-06-28 13:00:59,514 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:59,514 - INFO - outputs type: +2025-06-28 13:00:59,894 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:00:59,894 - INFO - outputs type: +2025-06-28 13:01:03,887 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:03,888 - INFO - outputs type: +2025-06-28 13:01:04,284 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:04,284 - INFO - outputs type: +2025-06-28 13:01:04,664 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:04,664 - INFO - outputs type: +2025-06-28 13:01:05,043 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:05,043 - INFO - outputs type: +2025-06-28 13:01:05,424 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:05,424 - INFO - outputs type: +2025-06-28 13:01:05,804 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:05,804 - INFO - outputs type: +2025-06-28 13:01:06,184 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:06,187 - INFO - outputs type: +2025-06-28 13:01:06,564 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:06,565 - INFO - outputs type: +2025-06-28 13:01:06,945 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:06,945 - INFO - outputs type: +2025-06-28 13:01:07,325 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:07,325 - INFO - outputs type: +2025-06-28 13:01:11,518 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:11,521 - INFO - outputs type: +2025-06-28 13:01:11,914 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:11,914 - INFO - outputs type: +2025-06-28 13:01:12,294 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:12,294 - INFO - outputs type: +2025-06-28 13:01:12,674 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:12,674 - INFO - outputs type: +2025-06-28 13:01:13,055 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:13,055 - INFO - outputs type: +2025-06-28 13:01:13,435 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:13,435 - INFO - outputs type: +2025-06-28 13:01:13,815 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:13,816 - INFO - outputs type: +2025-06-28 13:01:14,196 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:14,196 - INFO - outputs type: +2025-06-28 13:01:14,576 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:14,576 - INFO - outputs type: +2025-06-28 13:01:14,956 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:14,957 - INFO - outputs type: +2025-06-28 13:01:19,136 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:19,136 - INFO - outputs type: +2025-06-28 13:01:19,534 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:19,535 - INFO - outputs type: +2025-06-28 13:01:19,914 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:19,914 - INFO - outputs type: +2025-06-28 13:01:20,294 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:20,294 - INFO - outputs type: +2025-06-28 13:01:20,673 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:20,674 - INFO - outputs type: +2025-06-28 13:01:21,053 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:21,054 - INFO - outputs type: +2025-06-28 13:01:21,433 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:21,433 - INFO - outputs type: +2025-06-28 13:01:21,813 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:21,814 - INFO - outputs type: +2025-06-28 13:01:22,194 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:22,194 - INFO - outputs type: +2025-06-28 13:01:22,574 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:22,574 - INFO - outputs type: +2025-06-28 13:01:29,750 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:29,751 - INFO - outputs type: +2025-06-28 13:01:30,152 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:30,152 - INFO - outputs type: +2025-06-28 13:01:30,532 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:30,532 - INFO - outputs type: +2025-06-28 13:01:30,912 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:30,912 - INFO - outputs type: +2025-06-28 13:01:31,292 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:31,292 - INFO - outputs type: +2025-06-28 13:01:31,672 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:31,673 - INFO - outputs type: +2025-06-28 13:01:32,053 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:32,053 - INFO - outputs type: +2025-06-28 13:01:32,433 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:32,434 - INFO - outputs type: +2025-06-28 13:01:32,813 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:32,814 - INFO - outputs type: +2025-06-28 13:01:33,194 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:33,194 - INFO - outputs type: +2025-06-28 13:01:37,464 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:37,470 - INFO - outputs type: +2025-06-28 13:01:37,859 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:37,859 - INFO - outputs type: +2025-06-28 13:01:38,239 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:38,239 - INFO - outputs type: +2025-06-28 13:01:38,619 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:38,619 - INFO - outputs type: +2025-06-28 13:01:38,999 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:38,999 - INFO - outputs type: +2025-06-28 13:01:39,380 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:39,380 - INFO - outputs type: +2025-06-28 13:01:39,760 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:39,761 - INFO - outputs type: +2025-06-28 13:01:40,141 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:40,141 - INFO - outputs type: +2025-06-28 13:01:40,521 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:40,521 - INFO - outputs type: +2025-06-28 13:01:40,901 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:40,901 - INFO - outputs type: +2025-06-28 13:01:45,382 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:45,384 - INFO - outputs type: +2025-06-28 13:01:45,788 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:45,788 - INFO - outputs type: +2025-06-28 13:01:46,168 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:46,168 - INFO - outputs type: +2025-06-28 13:01:46,548 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:46,548 - INFO - outputs type: +2025-06-28 13:01:46,929 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:46,929 - INFO - outputs type: +2025-06-28 13:01:47,309 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:47,309 - INFO - outputs type: +2025-06-28 13:01:47,689 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:47,689 - INFO - outputs type: +2025-06-28 13:01:48,071 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:48,071 - INFO - outputs type: +2025-06-28 13:01:48,451 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:48,451 - INFO - outputs type: +2025-06-28 13:01:48,831 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:48,831 - INFO - outputs type: +2025-06-28 13:01:53,331 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:53,336 - INFO - outputs type: +2025-06-28 13:01:53,727 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:53,727 - INFO - outputs type: +2025-06-28 13:01:54,108 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:54,108 - INFO - outputs type: +2025-06-28 13:01:54,487 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:54,488 - INFO - outputs type: +2025-06-28 13:01:54,868 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:54,872 - INFO - outputs type: +2025-06-28 13:01:55,248 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:55,248 - INFO - outputs type: +2025-06-28 13:01:55,628 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:55,628 - INFO - outputs type: +2025-06-28 13:01:56,008 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:56,009 - INFO - outputs type: +2025-06-28 13:01:56,389 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:56,389 - INFO - outputs type: +2025-06-28 13:01:56,769 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:01:56,769 - INFO - outputs type: +2025-06-28 13:02:00,909 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:00,911 - INFO - outputs type: +2025-06-28 13:02:01,320 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:01,320 - INFO - outputs type: +2025-06-28 13:02:01,703 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:01,703 - INFO - outputs type: +2025-06-28 13:02:02,083 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:02,083 - INFO - outputs type: +2025-06-28 13:02:02,464 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:02,464 - INFO - outputs type: +2025-06-28 13:02:02,854 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:02,854 - INFO - outputs type: +2025-06-28 13:02:03,234 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:03,234 - INFO - outputs type: +2025-06-28 13:02:03,614 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:03,614 - INFO - outputs type: +2025-06-28 13:02:03,994 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:03,994 - INFO - outputs type: +2025-06-28 13:02:04,374 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:04,374 - INFO - outputs type: +2025-06-28 13:02:08,502 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:08,502 - INFO - outputs type: +2025-06-28 13:02:08,908 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:08,908 - INFO - outputs type: +2025-06-28 13:02:09,288 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:09,288 - INFO - outputs type: +2025-06-28 13:02:09,669 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:09,669 - INFO - outputs type: +2025-06-28 13:02:10,049 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:10,049 - INFO - outputs type: +2025-06-28 13:02:10,429 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:10,429 - INFO - outputs type: +2025-06-28 13:02:10,809 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:10,809 - INFO - outputs type: +2025-06-28 13:02:11,190 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:11,190 - INFO - outputs type: +2025-06-28 13:02:11,570 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:11,570 - INFO - outputs type: +2025-06-28 13:02:11,950 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:11,950 - INFO - outputs type: +2025-06-28 13:02:16,308 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:16,308 - INFO - outputs type: +2025-06-28 13:02:16,716 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:16,716 - INFO - outputs type: +2025-06-28 13:02:17,096 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:17,096 - INFO - outputs type: +2025-06-28 13:02:17,476 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:17,477 - INFO - outputs type: +2025-06-28 13:02:17,858 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:17,858 - INFO - outputs type: +2025-06-28 13:02:18,238 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:18,238 - INFO - outputs type: +2025-06-28 13:02:18,618 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:18,618 - INFO - outputs type: +2025-06-28 13:02:18,999 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:18,999 - INFO - outputs type: +2025-06-28 13:02:19,379 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:19,379 - INFO - outputs type: +2025-06-28 13:02:19,759 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:19,759 - INFO - outputs type: +2025-06-28 13:02:23,983 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:23,985 - INFO - outputs type: +2025-06-28 13:02:24,388 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:24,388 - INFO - outputs type: +2025-06-28 13:02:24,768 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:24,768 - INFO - outputs type: +2025-06-28 13:02:25,149 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:25,149 - INFO - outputs type: +2025-06-28 13:02:25,529 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:25,529 - INFO - outputs type: +2025-06-28 13:02:25,909 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:25,909 - INFO - outputs type: +2025-06-28 13:02:26,289 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:26,289 - INFO - outputs type: +2025-06-28 13:02:26,670 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:26,670 - INFO - outputs type: +2025-06-28 13:02:27,050 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:27,050 - INFO - outputs type: +2025-06-28 13:02:27,430 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:27,430 - INFO - outputs type: +2025-06-28 13:02:35,795 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:35,798 - INFO - outputs type: +2025-06-28 13:02:36,206 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:36,206 - INFO - outputs type: +2025-06-28 13:02:36,586 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:36,586 - INFO - outputs type: +2025-06-28 13:02:36,966 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:36,966 - INFO - outputs type: +2025-06-28 13:02:37,347 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:37,347 - INFO - outputs type: +2025-06-28 13:02:37,726 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:37,727 - INFO - outputs type: +2025-06-28 13:02:38,107 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:38,107 - INFO - outputs type: +2025-06-28 13:02:38,487 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:38,487 - INFO - outputs type: +2025-06-28 13:02:38,867 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:38,867 - INFO - outputs type: +2025-06-28 13:02:39,247 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:39,247 - INFO - outputs type: +2025-06-28 13:02:45,734 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:45,735 - INFO - outputs type: +2025-06-28 13:02:46,134 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:46,134 - INFO - outputs type: +2025-06-28 13:02:46,514 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:46,514 - INFO - outputs type: +2025-06-28 13:02:46,895 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:46,895 - INFO - outputs type: +2025-06-28 13:02:47,275 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:47,275 - INFO - outputs type: +2025-06-28 13:02:47,655 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:47,655 - INFO - outputs type: +2025-06-28 13:02:48,036 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:48,036 - INFO - outputs type: +2025-06-28 13:02:48,416 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:48,416 - INFO - outputs type: +2025-06-28 13:02:48,796 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:48,796 - INFO - outputs type: +2025-06-28 13:02:49,176 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:49,176 - INFO - outputs type: +2025-06-28 13:02:53,783 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:53,783 - INFO - outputs type: +2025-06-28 13:02:54,181 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:54,181 - INFO - outputs type: +2025-06-28 13:02:54,562 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:54,562 - INFO - outputs type: +2025-06-28 13:02:54,942 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:54,942 - INFO - outputs type: +2025-06-28 13:02:55,322 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:55,322 - INFO - outputs type: +2025-06-28 13:02:55,704 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:55,704 - INFO - outputs type: +2025-06-28 13:02:56,084 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:56,084 - INFO - outputs type: +2025-06-28 13:02:56,464 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:56,464 - INFO - outputs type: +2025-06-28 13:02:56,844 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:56,844 - INFO - outputs type: +2025-06-28 13:02:57,225 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:02:57,225 - INFO - outputs type: +2025-06-28 13:03:01,436 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:01,436 - INFO - outputs type: +2025-06-28 13:03:01,835 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:01,835 - INFO - outputs type: +2025-06-28 13:03:02,215 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:02,215 - INFO - outputs type: +2025-06-28 13:03:02,595 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:02,595 - INFO - outputs type: +2025-06-28 13:03:02,976 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:02,976 - INFO - outputs type: +2025-06-28 13:03:03,355 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:03,355 - INFO - outputs type: +2025-06-28 13:03:03,736 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:03,736 - INFO - outputs type: +2025-06-28 13:03:04,116 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:04,116 - INFO - outputs type: +2025-06-28 13:03:04,496 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:04,496 - INFO - outputs type: +2025-06-28 13:03:04,876 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:04,876 - INFO - outputs type: +2025-06-28 13:03:08,081 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:08,081 - INFO - outputs type: +2025-06-28 13:03:08,477 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:08,477 - INFO - outputs type: +2025-06-28 13:03:08,858 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:08,858 - INFO - outputs type: +2025-06-28 13:03:09,238 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:09,238 - INFO - outputs type: +2025-06-28 13:03:09,618 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:09,618 - INFO - outputs type: +2025-06-28 13:03:09,998 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:09,998 - INFO - outputs type: +2025-06-28 13:03:10,378 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:10,379 - INFO - outputs type: +2025-06-28 13:03:10,759 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:10,759 - INFO - outputs type: +2025-06-28 13:03:11,139 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:11,139 - INFO - outputs type: +2025-06-28 13:03:11,519 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:11,519 - INFO - outputs type: +2025-06-28 13:03:14,745 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:14,745 - INFO - outputs type: +2025-06-28 13:03:15,139 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:15,139 - INFO - outputs type: +2025-06-28 13:03:15,519 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:15,519 - INFO - outputs type: +2025-06-28 13:03:15,899 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:15,899 - INFO - outputs type: +2025-06-28 13:03:16,280 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:16,280 - INFO - outputs type: +2025-06-28 13:03:16,660 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:16,660 - INFO - outputs type: +2025-06-28 13:03:17,040 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:17,040 - INFO - outputs type: +2025-06-28 13:03:17,421 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:17,421 - INFO - outputs type: +2025-06-28 13:03:17,801 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:17,801 - INFO - outputs type: +2025-06-28 13:03:18,181 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:18,181 - INFO - outputs type: +2025-06-28 13:03:22,712 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:22,712 - INFO - outputs type: +2025-06-28 13:03:23,112 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:23,112 - INFO - outputs type: +2025-06-28 13:03:23,492 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:23,492 - INFO - outputs type: +2025-06-28 13:03:23,872 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:23,872 - INFO - outputs type: +2025-06-28 13:03:24,253 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:24,253 - INFO - outputs type: +2025-06-28 13:03:24,633 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:24,633 - INFO - outputs type: +2025-06-28 13:03:25,013 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:25,013 - INFO - outputs type: +2025-06-28 13:03:25,394 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:25,394 - INFO - outputs type: +2025-06-28 13:03:25,774 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:25,774 - INFO - outputs type: +2025-06-28 13:03:26,154 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:26,155 - INFO - outputs type: +2025-06-28 13:03:30,035 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:30,035 - INFO - outputs type: +2025-06-28 13:03:30,430 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:30,430 - INFO - outputs type: +2025-06-28 13:03:30,810 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:30,810 - INFO - outputs type: +2025-06-28 13:03:31,191 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:31,191 - INFO - outputs type: +2025-06-28 13:03:31,571 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:31,572 - INFO - outputs type: +2025-06-28 13:03:31,952 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:31,952 - INFO - outputs type: +2025-06-28 13:03:32,332 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:32,332 - INFO - outputs type: +2025-06-28 13:03:32,712 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:32,712 - INFO - outputs type: +2025-06-28 13:03:33,092 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:33,092 - INFO - outputs type: +2025-06-28 13:03:33,472 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:33,472 - INFO - outputs type: +2025-06-28 13:03:37,977 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:37,977 - INFO - outputs type: +2025-06-28 13:03:38,379 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:38,379 - INFO - outputs type: +2025-06-28 13:03:38,759 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:38,759 - INFO - outputs type: +2025-06-28 13:03:39,139 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:39,139 - INFO - outputs type: +2025-06-28 13:03:39,520 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:39,520 - INFO - outputs type: +2025-06-28 13:03:39,900 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:39,900 - INFO - outputs type: +2025-06-28 13:03:40,280 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:40,280 - INFO - outputs type: +2025-06-28 13:03:40,660 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:40,660 - INFO - outputs type: +2025-06-28 13:03:41,041 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:41,041 - INFO - outputs type: +2025-06-28 13:03:41,420 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:41,421 - INFO - outputs type: +2025-06-28 13:03:45,695 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:45,695 - INFO - outputs type: +2025-06-28 13:03:46,089 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:46,089 - INFO - outputs type: +2025-06-28 13:03:46,469 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:46,470 - INFO - outputs type: +2025-06-28 13:03:46,850 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:46,850 - INFO - outputs type: +2025-06-28 13:03:47,230 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:47,230 - INFO - outputs type: +2025-06-28 13:03:47,610 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:47,610 - INFO - outputs type: +2025-06-28 13:03:47,990 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:47,990 - INFO - outputs type: +2025-06-28 13:03:48,370 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:48,370 - INFO - outputs type: +2025-06-28 13:03:48,751 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:48,752 - INFO - outputs type: +2025-06-28 13:03:49,131 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:49,131 - INFO - outputs type: +2025-06-28 13:03:52,412 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:52,412 - INFO - outputs type: +2025-06-28 13:03:52,818 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:52,818 - INFO - outputs type: +2025-06-28 13:03:53,198 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:53,198 - INFO - outputs type: +2025-06-28 13:03:53,578 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:53,578 - INFO - outputs type: +2025-06-28 13:03:53,958 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:53,959 - INFO - outputs type: +2025-06-28 13:03:54,339 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:54,339 - INFO - outputs type: +2025-06-28 13:03:54,719 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:54,719 - INFO - outputs type: +2025-06-28 13:03:55,099 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:55,099 - INFO - outputs type: +2025-06-28 13:03:55,479 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:55,480 - INFO - outputs type: +2025-06-28 13:03:55,859 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:03:55,860 - INFO - outputs type: +2025-06-28 13:04:00,776 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:00,777 - INFO - outputs type: +2025-06-28 13:04:01,170 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:01,170 - INFO - outputs type: +2025-06-28 13:04:01,550 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:01,550 - INFO - outputs type: +2025-06-28 13:04:01,930 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:01,930 - INFO - outputs type: +2025-06-28 13:04:02,311 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:02,311 - INFO - outputs type: +2025-06-28 13:04:02,691 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:02,691 - INFO - outputs type: +2025-06-28 13:04:03,071 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:03,071 - INFO - outputs type: +2025-06-28 13:04:03,451 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:03,451 - INFO - outputs type: +2025-06-28 13:04:03,831 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:03,831 - INFO - outputs type: +2025-06-28 13:04:04,212 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:04,212 - INFO - outputs type: +2025-06-28 13:04:07,740 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:07,740 - INFO - outputs type: +2025-06-28 13:04:08,139 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:08,139 - INFO - outputs type: +2025-06-28 13:04:08,519 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:08,519 - INFO - outputs type: +2025-06-28 13:04:08,899 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:08,899 - INFO - outputs type: +2025-06-28 13:04:09,280 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:09,280 - INFO - outputs type: +2025-06-28 13:04:09,660 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:09,660 - INFO - outputs type: +2025-06-28 13:04:10,040 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:10,041 - INFO - outputs type: +2025-06-28 13:04:10,421 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:10,433 - INFO - outputs type: +2025-06-28 13:04:10,801 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:10,801 - INFO - outputs type: +2025-06-28 13:04:11,181 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:11,181 - INFO - outputs type: +2025-06-28 13:04:14,896 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:14,896 - INFO - outputs type: +2025-06-28 13:04:15,303 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:15,303 - INFO - outputs type: +2025-06-28 13:04:15,684 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:15,684 - INFO - outputs type: +2025-06-28 13:04:16,064 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:16,064 - INFO - outputs type: +2025-06-28 13:04:16,444 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:16,444 - INFO - outputs type: +2025-06-28 13:04:16,824 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:16,824 - INFO - outputs type: +2025-06-28 13:04:17,205 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:17,205 - INFO - outputs type: +2025-06-28 13:04:17,585 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:17,585 - INFO - outputs type: +2025-06-28 13:04:17,965 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:17,965 - INFO - outputs type: +2025-06-28 13:04:18,345 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:18,345 - INFO - outputs type: +2025-06-28 13:04:37,631 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:37,639 - INFO - outputs type: +2025-06-28 13:04:38,035 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:38,035 - INFO - outputs type: +2025-06-28 13:04:38,417 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:38,418 - INFO - outputs type: +2025-06-28 13:04:38,800 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:38,800 - INFO - outputs type: +2025-06-28 13:04:39,182 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:39,182 - INFO - outputs type: +2025-06-28 13:04:39,562 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:39,563 - INFO - outputs type: +2025-06-28 13:04:39,942 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:39,943 - INFO - outputs type: +2025-06-28 13:04:40,325 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:40,325 - INFO - outputs type: +2025-06-28 13:04:40,707 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:40,707 - INFO - outputs type: +2025-06-28 13:04:41,095 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:41,095 - INFO - outputs type: +2025-06-28 13:04:48,409 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:48,409 - INFO - outputs type: +2025-06-28 13:04:48,804 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:48,804 - INFO - outputs type: +2025-06-28 13:04:49,184 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:49,184 - INFO - outputs type: +2025-06-28 13:04:49,564 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:49,564 - INFO - outputs type: +2025-06-28 13:04:49,945 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:49,945 - INFO - outputs type: +2025-06-28 13:04:50,325 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:50,325 - INFO - outputs type: +2025-06-28 13:04:50,705 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:50,706 - INFO - outputs type: +2025-06-28 13:04:51,086 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:51,086 - INFO - outputs type: +2025-06-28 13:04:51,466 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:51,466 - INFO - outputs type: +2025-06-28 13:04:51,846 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:51,846 - INFO - outputs type: +2025-06-28 13:04:56,005 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:56,005 - INFO - outputs type: +2025-06-28 13:04:56,400 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:56,401 - INFO - outputs type: +2025-06-28 13:04:56,780 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:56,781 - INFO - outputs type: +2025-06-28 13:04:57,161 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:57,161 - INFO - outputs type: +2025-06-28 13:04:57,541 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:57,541 - INFO - outputs type: +2025-06-28 13:04:57,921 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:57,921 - INFO - outputs type: +2025-06-28 13:04:58,301 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:58,302 - INFO - outputs type: +2025-06-28 13:04:58,682 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:58,682 - INFO - outputs type: +2025-06-28 13:04:59,062 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:59,062 - INFO - outputs type: +2025-06-28 13:04:59,442 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:04:59,442 - INFO - outputs type: +2025-06-28 13:05:03,168 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:03,168 - INFO - outputs type: +2025-06-28 13:05:03,570 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:03,571 - INFO - outputs type: +2025-06-28 13:05:03,951 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:03,951 - INFO - outputs type: +2025-06-28 13:05:04,331 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:04,331 - INFO - outputs type: +2025-06-28 13:05:04,711 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:04,711 - INFO - outputs type: +2025-06-28 13:05:05,091 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:05,091 - INFO - outputs type: +2025-06-28 13:05:05,472 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:05,472 - INFO - outputs type: +2025-06-28 13:05:05,852 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:05,852 - INFO - outputs type: +2025-06-28 13:05:06,233 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:06,233 - INFO - outputs type: +2025-06-28 13:05:06,613 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:06,613 - INFO - outputs type: +2025-06-28 13:05:10,615 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:10,615 - INFO - outputs type: +2025-06-28 13:05:11,016 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:11,017 - INFO - outputs type: +2025-06-28 13:05:11,396 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:11,396 - INFO - outputs type: +2025-06-28 13:05:11,777 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:11,777 - INFO - outputs type: +2025-06-28 13:05:12,157 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:12,157 - INFO - outputs type: +2025-06-28 13:05:12,537 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:12,537 - INFO - outputs type: +2025-06-28 13:05:12,917 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:12,917 - INFO - outputs type: +2025-06-28 13:05:13,297 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:13,297 - INFO - outputs type: +2025-06-28 13:05:13,677 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:13,677 - INFO - outputs type: +2025-06-28 13:05:14,057 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:14,057 - INFO - outputs type: +2025-06-28 13:05:18,025 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:18,025 - INFO - outputs type: +2025-06-28 13:05:18,429 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:18,429 - INFO - outputs type: +2025-06-28 13:05:18,821 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:18,821 - INFO - outputs type: +2025-06-28 13:05:19,201 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:19,201 - INFO - outputs type: +2025-06-28 13:05:19,581 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:19,581 - INFO - outputs type: +2025-06-28 13:05:19,961 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:19,962 - INFO - outputs type: +2025-06-28 13:05:20,342 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:20,342 - INFO - outputs type: +2025-06-28 13:05:20,722 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:20,722 - INFO - outputs type: +2025-06-28 13:05:21,102 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:21,102 - INFO - outputs type: +2025-06-28 13:05:21,482 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:21,482 - INFO - outputs type: +2025-06-28 13:05:24,918 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:24,918 - INFO - outputs type: +2025-06-28 13:05:25,316 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:25,316 - INFO - outputs type: +2025-06-28 13:05:25,697 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:25,698 - INFO - outputs type: +2025-06-28 13:05:26,078 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:26,078 - INFO - outputs type: +2025-06-28 13:05:26,458 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:26,458 - INFO - outputs type: +2025-06-28 13:05:26,838 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:26,838 - INFO - outputs type: +2025-06-28 13:05:27,218 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:27,218 - INFO - outputs type: +2025-06-28 13:05:27,599 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:27,599 - INFO - outputs type: +2025-06-28 13:05:27,979 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:27,979 - INFO - outputs type: +2025-06-28 13:05:28,359 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:28,359 - INFO - outputs type: +2025-06-28 13:05:34,074 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:34,075 - INFO - outputs type: +2025-06-28 13:05:34,522 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:34,522 - INFO - outputs type: +2025-06-28 13:05:34,902 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:34,902 - INFO - outputs type: +2025-06-28 13:05:35,337 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:35,337 - INFO - outputs type: +2025-06-28 13:05:35,720 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:35,720 - INFO - outputs type: +2025-06-28 13:05:36,108 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:36,108 - INFO - outputs type: +2025-06-28 13:05:36,513 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:36,513 - INFO - outputs type: +2025-06-28 13:05:36,895 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:36,895 - INFO - outputs type: +2025-06-28 13:05:37,276 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:37,276 - INFO - outputs type: +2025-06-28 13:05:37,698 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:37,698 - INFO - outputs type: +2025-06-28 13:05:41,903 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:41,903 - INFO - outputs type: +2025-06-28 13:05:42,310 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:42,311 - INFO - outputs type: +2025-06-28 13:05:42,691 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:42,691 - INFO - outputs type: +2025-06-28 13:05:43,071 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:43,071 - INFO - outputs type: +2025-06-28 13:05:43,451 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:43,451 - INFO - outputs type: +2025-06-28 13:05:43,831 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:43,831 - INFO - outputs type: +2025-06-28 13:05:44,212 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:44,212 - INFO - outputs type: +2025-06-28 13:05:44,592 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:44,592 - INFO - outputs type: +2025-06-28 13:05:44,973 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:44,973 - INFO - outputs type: +2025-06-28 13:05:45,353 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:45,353 - INFO - outputs type: +2025-06-28 13:05:49,667 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:49,667 - INFO - outputs type: +2025-06-28 13:05:50,062 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:50,063 - INFO - outputs type: +2025-06-28 13:05:50,443 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:50,443 - INFO - outputs type: +2025-06-28 13:05:50,823 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:50,823 - INFO - outputs type: +2025-06-28 13:05:51,203 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:51,203 - INFO - outputs type: +2025-06-28 13:05:51,584 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:51,584 - INFO - outputs type: +2025-06-28 13:05:51,964 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:51,964 - INFO - outputs type: +2025-06-28 13:05:52,344 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:52,344 - INFO - outputs type: +2025-06-28 13:05:52,724 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:52,724 - INFO - outputs type: +2025-06-28 13:05:53,104 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:53,104 - INFO - outputs type: +2025-06-28 13:05:56,882 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:56,882 - INFO - outputs type: +2025-06-28 13:05:57,280 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:57,280 - INFO - outputs type: +2025-06-28 13:05:57,660 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:57,660 - INFO - outputs type: +2025-06-28 13:05:58,041 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:58,041 - INFO - outputs type: +2025-06-28 13:05:58,421 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:58,421 - INFO - outputs type: +2025-06-28 13:05:58,801 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:58,801 - INFO - outputs type: +2025-06-28 13:05:59,181 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:59,181 - INFO - outputs type: +2025-06-28 13:05:59,561 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:59,562 - INFO - outputs type: +2025-06-28 13:05:59,941 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:05:59,942 - INFO - outputs type: +2025-06-28 13:06:00,322 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:00,322 - INFO - outputs type: +2025-06-28 13:06:04,473 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:04,473 - INFO - outputs type: +2025-06-28 13:06:04,875 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:04,875 - INFO - outputs type: +2025-06-28 13:06:05,255 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:05,255 - INFO - outputs type: +2025-06-28 13:06:05,635 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:05,635 - INFO - outputs type: +2025-06-28 13:06:06,015 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:06,015 - INFO - outputs type: +2025-06-28 13:06:06,395 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:06,395 - INFO - outputs type: +2025-06-28 13:06:06,776 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:06,776 - INFO - outputs type: +2025-06-28 13:06:07,156 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:07,156 - INFO - outputs type: +2025-06-28 13:06:07,537 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:07,537 - INFO - outputs type: +2025-06-28 13:06:07,917 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:07,917 - INFO - outputs type: +2025-06-28 13:06:11,922 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:11,922 - INFO - outputs type: +2025-06-28 13:06:12,321 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:12,321 - INFO - outputs type: +2025-06-28 13:06:12,701 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:12,701 - INFO - outputs type: +2025-06-28 13:06:13,082 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:13,082 - INFO - outputs type: +2025-06-28 13:06:13,462 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:13,462 - INFO - outputs type: +2025-06-28 13:06:13,842 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:13,842 - INFO - outputs type: +2025-06-28 13:06:14,223 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:14,223 - INFO - outputs type: +2025-06-28 13:06:14,603 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:14,603 - INFO - outputs type: +2025-06-28 13:06:14,984 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:14,984 - INFO - outputs type: +2025-06-28 13:06:15,364 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:15,365 - INFO - outputs type: +2025-06-28 13:06:18,924 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:18,924 - INFO - outputs type: +2025-06-28 13:06:19,328 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:19,328 - INFO - outputs type: +2025-06-28 13:06:19,708 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:19,708 - INFO - outputs type: +2025-06-28 13:06:20,088 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:20,088 - INFO - outputs type: +2025-06-28 13:06:20,468 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:20,469 - INFO - outputs type: +2025-06-28 13:06:20,849 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:20,849 - INFO - outputs type: +2025-06-28 13:06:21,229 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:21,229 - INFO - outputs type: +2025-06-28 13:06:21,609 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:21,609 - INFO - outputs type: +2025-06-28 13:06:21,989 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:21,989 - INFO - outputs type: +2025-06-28 13:06:22,369 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:22,369 - INFO - outputs type: +2025-06-28 13:06:26,039 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:26,039 - INFO - outputs type: +2025-06-28 13:06:26,435 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:26,435 - INFO - outputs type: +2025-06-28 13:06:26,815 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:26,815 - INFO - outputs type: +2025-06-28 13:06:27,195 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:27,195 - INFO - outputs type: +2025-06-28 13:06:27,575 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:27,575 - INFO - outputs type: +2025-06-28 13:06:27,955 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:27,955 - INFO - outputs type: +2025-06-28 13:06:28,337 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:28,337 - INFO - outputs type: +2025-06-28 13:06:28,717 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:28,717 - INFO - outputs type: +2025-06-28 13:06:29,097 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:29,097 - INFO - outputs type: +2025-06-28 13:06:29,477 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:29,477 - INFO - outputs type: +2025-06-28 13:06:32,773 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:32,773 - INFO - outputs type: +2025-06-28 13:06:33,178 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:33,178 - INFO - outputs type: +2025-06-28 13:06:33,558 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:33,559 - INFO - outputs type: +2025-06-28 13:06:33,952 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:33,952 - INFO - outputs type: +2025-06-28 13:06:34,332 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:34,333 - INFO - outputs type: +2025-06-28 13:06:34,712 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:34,713 - INFO - outputs type: +2025-06-28 13:06:35,093 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:35,093 - INFO - outputs type: +2025-06-28 13:06:35,473 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:35,473 - INFO - outputs type: +2025-06-28 13:06:35,853 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:35,853 - INFO - outputs type: +2025-06-28 13:06:36,233 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:36,233 - INFO - outputs type: +2025-06-28 13:06:39,529 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:39,530 - INFO - outputs type: +2025-06-28 13:06:39,922 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:39,922 - INFO - outputs type: +2025-06-28 13:06:40,302 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:40,302 - INFO - outputs type: +2025-06-28 13:06:40,682 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:40,683 - INFO - outputs type: +2025-06-28 13:06:41,063 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:41,063 - INFO - outputs type: +2025-06-28 13:06:41,443 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:41,443 - INFO - outputs type: +2025-06-28 13:06:41,823 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:41,823 - INFO - outputs type: +2025-06-28 13:06:42,203 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:42,204 - INFO - outputs type: +2025-06-28 13:06:42,584 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:42,584 - INFO - outputs type: +2025-06-28 13:06:42,964 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:42,964 - INFO - outputs type: +2025-06-28 13:06:49,804 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:49,804 - INFO - outputs type: +2025-06-28 13:06:50,203 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:50,203 - INFO - outputs type: +2025-06-28 13:06:50,584 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:50,584 - INFO - outputs type: +2025-06-28 13:06:50,964 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:50,964 - INFO - outputs type: +2025-06-28 13:06:51,344 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:51,344 - INFO - outputs type: +2025-06-28 13:06:51,724 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:51,724 - INFO - outputs type: +2025-06-28 13:06:52,104 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:52,104 - INFO - outputs type: +2025-06-28 13:06:52,498 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:52,498 - INFO - outputs type: +2025-06-28 13:06:52,878 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:52,878 - INFO - outputs type: +2025-06-28 13:06:53,257 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:53,258 - INFO - outputs type: +2025-06-28 13:06:57,665 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:57,665 - INFO - outputs type: +2025-06-28 13:06:58,073 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:58,073 - INFO - outputs type: +2025-06-28 13:06:58,453 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:58,453 - INFO - outputs type: +2025-06-28 13:06:58,833 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:58,833 - INFO - outputs type: +2025-06-28 13:06:59,213 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:59,213 - INFO - outputs type: +2025-06-28 13:06:59,593 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:59,594 - INFO - outputs type: +2025-06-28 13:06:59,974 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:06:59,974 - INFO - outputs type: +2025-06-28 13:07:00,354 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:00,354 - INFO - outputs type: +2025-06-28 13:07:00,734 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:00,734 - INFO - outputs type: +2025-06-28 13:07:01,118 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:01,118 - INFO - outputs type: +2025-06-28 13:07:04,911 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:04,912 - INFO - outputs type: +2025-06-28 13:07:05,318 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:05,318 - INFO - outputs type: +2025-06-28 13:07:05,699 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:05,699 - INFO - outputs type: +2025-06-28 13:07:06,082 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:06,082 - INFO - outputs type: +2025-06-28 13:07:06,462 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:06,462 - INFO - outputs type: +2025-06-28 13:07:06,843 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:06,843 - INFO - outputs type: +2025-06-28 13:07:07,223 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:07,223 - INFO - outputs type: +2025-06-28 13:07:07,603 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:07,603 - INFO - outputs type: +2025-06-28 13:07:07,983 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:07,984 - INFO - outputs type: +2025-06-28 13:07:08,363 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:08,363 - INFO - outputs type: +2025-06-28 13:07:12,462 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:12,462 - INFO - outputs type: +2025-06-28 13:07:12,858 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:12,859 - INFO - outputs type: +2025-06-28 13:07:13,239 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:13,239 - INFO - outputs type: +2025-06-28 13:07:13,619 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:13,619 - INFO - outputs type: +2025-06-28 13:07:13,999 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:13,999 - INFO - outputs type: +2025-06-28 13:07:14,380 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:14,380 - INFO - outputs type: +2025-06-28 13:07:14,760 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:14,760 - INFO - outputs type: +2025-06-28 13:07:15,140 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:15,140 - INFO - outputs type: +2025-06-28 13:07:15,520 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:15,520 - INFO - outputs type: +2025-06-28 13:07:15,900 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:15,901 - INFO - outputs type: +2025-06-28 13:07:19,157 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:19,157 - INFO - outputs type: +2025-06-28 13:07:19,558 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:19,559 - INFO - outputs type: +2025-06-28 13:07:19,939 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:19,939 - INFO - outputs type: +2025-06-28 13:07:20,318 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:20,318 - INFO - outputs type: +2025-06-28 13:07:20,699 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:20,699 - INFO - outputs type: +2025-06-28 13:07:21,079 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:21,079 - INFO - outputs type: +2025-06-28 13:07:21,459 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:21,459 - INFO - outputs type: +2025-06-28 13:07:21,839 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:21,839 - INFO - outputs type: +2025-06-28 13:07:22,219 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:22,220 - INFO - outputs type: +2025-06-28 13:07:22,599 - INFO - outputs.shape: torch.Size([8, 1, 10000]) +2025-06-28 13:07:22,599 - INFO - outputs type: +2025-06-28 13:55:53,492 - INFO - args.exp_name : Train_Test +2025-06-28 13:55:53,494 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 13:55:53,494 - INFO - Starting training with 1 GPUs +2025-06-28 13:55:59,335 - INFO - Total trainable parameters: 1437705 +2025-06-28 13:55:59,394 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 13:55:59,399 - INFO - Staring training for 50 epochs +2025-06-28 13:56:11,867 - INFO - type of train_loss: +2025-06-28 14:00:45,985 - INFO - args.exp_name : Train_Test +2025-06-28 14:00:45,986 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 14:00:45,986 - INFO - Starting training with 1 GPUs +2025-06-28 14:00:50,736 - INFO - Total trainable parameters: 1437705 +2025-06-28 14:00:50,792 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 14:00:50,796 - INFO - Staring training for 50 epochs +2025-06-28 14:00:58,698 - INFO - type of total_loss: +2025-06-28 14:00:58,698 - INFO - total_loss value: 1.2830595970153809 +2025-06-28 14:00:58,698 - INFO - length of train_dataloader: 10 +2025-06-28 14:00:59,084 - INFO - type of total_loss: +2025-06-28 14:00:59,084 - INFO - total_loss value: 2.7383272647857666 +2025-06-28 14:00:59,084 - INFO - length of train_dataloader: 10 +2025-06-28 14:00:59,464 - INFO - type of total_loss: +2025-06-28 14:00:59,464 - INFO - total_loss value: 3.9416478872299194 +2025-06-28 14:00:59,464 - INFO - length of train_dataloader: 10 +2025-06-28 14:00:59,845 - INFO - type of total_loss: +2025-06-28 14:00:59,845 - INFO - total_loss value: 5.303300380706787 +2025-06-28 14:00:59,845 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:00,225 - INFO - type of total_loss: +2025-06-28 14:01:00,226 - INFO - total_loss value: 6.532232999801636 +2025-06-28 14:01:00,226 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:00,606 - INFO - type of total_loss: +2025-06-28 14:01:00,606 - INFO - total_loss value: 7.6904706954956055 +2025-06-28 14:01:00,606 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:00,986 - INFO - type of total_loss: +2025-06-28 14:01:00,986 - INFO - total_loss value: 8.918874979019165 +2025-06-28 14:01:00,986 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:01,370 - INFO - type of total_loss: +2025-06-28 14:01:01,370 - INFO - total_loss value: 10.084421396255493 +2025-06-28 14:01:01,370 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:01,750 - INFO - type of total_loss: +2025-06-28 14:01:01,750 - INFO - total_loss value: 11.2518949508667 +2025-06-28 14:01:01,751 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:02,131 - INFO - type of total_loss: +2025-06-28 14:01:02,131 - INFO - total_loss value: 12.53324019908905 +2025-06-28 14:01:02,131 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:07,628 - INFO - type of total_loss: +2025-06-28 14:01:07,629 - INFO - total_loss value: 1.1851657629013062 +2025-06-28 14:01:07,630 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:08,010 - INFO - type of total_loss: +2025-06-28 14:01:08,010 - INFO - total_loss value: 2.3786693811416626 +2025-06-28 14:01:08,010 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:08,391 - INFO - type of total_loss: +2025-06-28 14:01:08,391 - INFO - total_loss value: 3.5073537826538086 +2025-06-28 14:01:08,391 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:08,771 - INFO - type of total_loss: +2025-06-28 14:01:08,771 - INFO - total_loss value: 4.666755557060242 +2025-06-28 14:01:08,771 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:09,152 - INFO - type of total_loss: +2025-06-28 14:01:09,152 - INFO - total_loss value: 5.821822285652161 +2025-06-28 14:01:09,152 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:09,532 - INFO - type of total_loss: +2025-06-28 14:01:09,532 - INFO - total_loss value: 7.113649845123291 +2025-06-28 14:01:09,532 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:09,912 - INFO - type of total_loss: +2025-06-28 14:01:09,913 - INFO - total_loss value: 8.341907858848572 +2025-06-28 14:01:09,913 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:10,293 - INFO - type of total_loss: +2025-06-28 14:01:10,293 - INFO - total_loss value: 9.507506847381592 +2025-06-28 14:01:10,293 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:10,673 - INFO - type of total_loss: +2025-06-28 14:01:10,674 - INFO - total_loss value: 10.727226853370667 +2025-06-28 14:01:10,674 - INFO - length of train_dataloader: 10 +2025-06-28 14:01:11,054 - INFO - type of total_loss: +2025-06-28 14:01:11,054 - INFO - total_loss value: 11.883892893791199 +2025-06-28 14:01:11,054 - INFO - length of train_dataloader: 10 +2025-06-28 14:30:54,618 - INFO - args.exp_name : Train_Test +2025-06-28 14:30:54,620 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 14:30:54,620 - INFO - Starting training with 1 GPUs +2025-06-28 14:30:59,877 - INFO - Total trainable parameters: 1437705 +2025-06-28 14:30:59,930 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 14:30:59,934 - INFO - Staring training for 50 epochs +2025-06-28 14:31:13,391 - INFO - type of total_loss: +2025-06-28 14:31:13,391 - INFO - total_loss value: 12.53324019908905 +2025-06-28 14:31:13,391 - INFO - length of train_dataloader: 10 +2025-06-28 14:33:56,525 - INFO - args.exp_name : Train_Test +2025-06-28 14:33:56,527 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 14:33:56,527 - INFO - Starting training with 1 GPUs +2025-06-28 14:34:02,326 - INFO - Total trainable parameters: 1437705 +2025-06-28 14:34:02,391 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 14:34:02,395 - INFO - Staring training for 50 epochs +2025-06-28 14:34:14,519 - INFO - type of total_loss: +2025-06-28 14:34:14,519 - INFO - total_loss value: 12.53324019908905 +2025-06-28 14:34:14,519 - INFO - length of train_dataloader: 10 +2025-06-28 14:34:19,192 - INFO - Epoch 1/50 - Train Loss: 1.253324, Val Loss: 1.131323 +2025-06-28 14:34:26,002 - INFO - type of total_loss: +2025-06-28 14:34:26,002 - INFO - total_loss value: 11.883323550224304 +2025-06-28 14:34:26,003 - INFO - length of train_dataloader: 10 +2025-06-28 14:34:29,365 - INFO - Epoch 2/50 - Train Loss: 1.188332, Val Loss: 1.119962 +2025-06-28 14:34:36,476 - INFO - type of total_loss: +2025-06-28 14:34:36,476 - INFO - total_loss value: 11.823585867881775 +2025-06-28 14:34:36,476 - INFO - length of train_dataloader: 10 +2025-06-28 14:34:40,682 - INFO - Epoch 3/50 - Train Loss: 1.182359, Val Loss: 1.114902 +2025-06-28 14:34:47,283 - INFO - type of total_loss: +2025-06-28 14:34:47,283 - INFO - total_loss value: 11.843237161636353 +2025-06-28 14:34:47,283 - INFO - length of train_dataloader: 10 +2025-06-28 14:34:50,649 - INFO - Epoch 4/50 - Train Loss: 1.184324, Val Loss: 1.109728 +2025-06-28 14:34:57,268 - INFO - type of total_loss: +2025-06-28 14:34:57,269 - INFO - total_loss value: 11.776811599731445 +2025-06-28 14:34:57,269 - INFO - length of train_dataloader: 10 +2025-06-28 14:35:02,517 - INFO - Epoch 5/50 - Train Loss: 1.177681, Val Loss: 1.104723 +2025-06-28 14:35:09,777 - INFO - type of total_loss: +2025-06-28 14:35:09,777 - INFO - total_loss value: 11.750238299369812 +2025-06-28 14:35:09,777 - INFO - length of train_dataloader: 10 +2025-06-28 14:35:13,971 - INFO - Epoch 6/50 - Train Loss: 1.175024, Val Loss: 1.096504 +2025-06-28 14:35:21,083 - INFO - type of total_loss: +2025-06-28 14:35:21,083 - INFO - total_loss value: 11.775049209594727 +2025-06-28 14:35:21,083 - INFO - length of train_dataloader: 10 +2025-06-28 14:35:24,974 - INFO - Epoch 7/50 - Train Loss: 1.177505, Val Loss: 1.082989 +2025-06-28 14:35:31,548 - INFO - type of total_loss: +2025-06-28 14:35:31,548 - INFO - total_loss value: 11.75777268409729 +2025-06-28 14:35:31,548 - INFO - length of train_dataloader: 10 +2025-06-28 14:35:35,718 - INFO - Epoch 8/50 - Train Loss: 1.175777, Val Loss: 1.058120 +2025-06-28 14:35:42,543 - INFO - type of total_loss: +2025-06-28 14:35:42,544 - INFO - total_loss value: 11.703246235847473 +2025-06-28 14:35:42,544 - INFO - length of train_dataloader: 10 +2025-06-28 14:35:45,945 - INFO - Epoch 9/50 - Train Loss: 1.170325, Val Loss: 1.028193 +2025-06-28 14:35:52,544 - INFO - type of total_loss: +2025-06-28 14:35:52,544 - INFO - total_loss value: 11.609472751617432 +2025-06-28 14:35:52,544 - INFO - length of train_dataloader: 10 +2025-06-28 14:35:56,999 - INFO - Epoch 10/50 - Train Loss: 1.160947, Val Loss: 1.010467 +2025-06-28 14:36:04,372 - INFO - type of total_loss: +2025-06-28 14:36:04,372 - INFO - total_loss value: 11.738473534584045 +2025-06-28 14:36:04,372 - INFO - length of train_dataloader: 10 +2025-06-28 14:36:07,761 - INFO - Epoch 11/50 - Train Loss: 1.173847, Val Loss: 0.980210 +2025-06-28 14:36:14,332 - INFO - type of total_loss: +2025-06-28 14:36:14,332 - INFO - total_loss value: 11.720927715301514 +2025-06-28 14:36:14,332 - INFO - length of train_dataloader: 10 +2025-06-28 14:36:17,747 - INFO - Epoch 12/50 - Train Loss: 1.172093, Val Loss: 1.007287 +2025-06-28 14:36:25,145 - INFO - type of total_loss: +2025-06-28 14:36:25,145 - INFO - total_loss value: 11.553749322891235 +2025-06-28 14:36:25,145 - INFO - length of train_dataloader: 10 +2025-06-28 14:36:31,979 - INFO - Epoch 13/50 - Train Loss: 1.155375, Val Loss: 0.980951 +2025-06-28 14:36:39,729 - INFO - type of total_loss: +2025-06-28 14:36:39,729 - INFO - total_loss value: 11.684824585914612 +2025-06-28 14:36:39,729 - INFO - length of train_dataloader: 10 +2025-06-28 14:36:43,199 - INFO - Epoch 14/50 - Train Loss: 1.168482, Val Loss: 0.981301 +2025-06-28 14:36:50,208 - INFO - type of total_loss: +2025-06-28 14:36:50,209 - INFO - total_loss value: 11.664612531661987 +2025-06-28 14:36:50,209 - INFO - length of train_dataloader: 10 +2025-06-28 14:36:53,733 - INFO - Epoch 15/50 - Train Loss: 1.166461, Val Loss: 0.970300 +2025-06-28 14:37:00,281 - INFO - type of total_loss: +2025-06-28 14:37:00,282 - INFO - total_loss value: 11.67478322982788 +2025-06-28 14:37:00,282 - INFO - length of train_dataloader: 10 +2025-06-28 14:37:04,576 - INFO - Epoch 16/50 - Train Loss: 1.167478, Val Loss: 0.976532 +2025-06-28 14:37:15,039 - INFO - type of total_loss: +2025-06-28 14:37:15,039 - INFO - total_loss value: 11.674216032028198 +2025-06-28 14:37:15,039 - INFO - length of train_dataloader: 10 +2025-06-28 14:37:19,651 - INFO - Epoch 17/50 - Train Loss: 1.167422, Val Loss: 0.979567 +2025-06-28 14:37:26,237 - INFO - type of total_loss: +2025-06-28 14:37:26,237 - INFO - total_loss value: 11.67242169380188 +2025-06-28 14:37:26,237 - INFO - length of train_dataloader: 10 +2025-06-28 14:37:30,199 - INFO - Epoch 18/50 - Train Loss: 1.167242, Val Loss: 0.970081 +2025-06-28 14:37:36,865 - INFO - type of total_loss: +2025-06-28 14:37:36,865 - INFO - total_loss value: 11.638634324073792 +2025-06-28 14:37:36,865 - INFO - length of train_dataloader: 10 +2025-06-28 14:37:40,164 - INFO - Epoch 19/50 - Train Loss: 1.163863, Val Loss: 0.967578 +2025-06-28 14:37:46,789 - INFO - type of total_loss: +2025-06-28 14:37:46,790 - INFO - total_loss value: 11.648188829421997 +2025-06-28 14:37:46,790 - INFO - length of train_dataloader: 10 +2025-06-28 14:37:51,610 - INFO - Epoch 20/50 - Train Loss: 1.164819, Val Loss: 0.955320 +2025-06-28 14:37:58,649 - INFO - type of total_loss: +2025-06-28 14:37:58,649 - INFO - total_loss value: 11.66547679901123 +2025-06-28 14:37:58,649 - INFO - length of train_dataloader: 10 +2025-06-28 14:38:02,507 - INFO - Epoch 21/50 - Train Loss: 1.166548, Val Loss: 0.952791 +2025-06-28 14:38:09,604 - INFO - type of total_loss: +2025-06-28 14:38:09,604 - INFO - total_loss value: 11.666450500488281 +2025-06-28 14:38:09,604 - INFO - length of train_dataloader: 10 +2025-06-28 14:38:12,946 - INFO - Epoch 22/50 - Train Loss: 1.166645, Val Loss: 0.953860 +2025-06-28 14:38:19,666 - INFO - type of total_loss: +2025-06-28 14:38:19,666 - INFO - total_loss value: 11.671009302139282 +2025-06-28 14:38:19,666 - INFO - length of train_dataloader: 10 +2025-06-28 14:38:24,317 - INFO - Epoch 23/50 - Train Loss: 1.167101, Val Loss: 0.951229 +2025-06-28 14:38:31,048 - INFO - type of total_loss: +2025-06-28 14:38:31,048 - INFO - total_loss value: 11.672417163848877 +2025-06-28 14:38:31,048 - INFO - length of train_dataloader: 10 +2025-06-28 14:38:34,499 - INFO - Epoch 24/50 - Train Loss: 1.167242, Val Loss: 0.957333 +2025-06-28 14:38:41,942 - INFO - type of total_loss: +2025-06-28 14:38:41,942 - INFO - total_loss value: 11.652331113815308 +2025-06-28 14:38:41,942 - INFO - length of train_dataloader: 10 +2025-06-28 14:38:46,164 - INFO - Epoch 25/50 - Train Loss: 1.165233, Val Loss: 0.958300 +2025-06-28 14:38:52,785 - INFO - type of total_loss: +2025-06-28 14:38:52,785 - INFO - total_loss value: 11.66399097442627 +2025-06-28 14:38:52,786 - INFO - length of train_dataloader: 10 +2025-06-28 14:38:56,735 - INFO - Epoch 26/50 - Train Loss: 1.166399, Val Loss: 0.944509 +2025-06-28 14:39:04,158 - INFO - type of total_loss: +2025-06-28 14:39:04,158 - INFO - total_loss value: 11.5212641954422 +2025-06-28 14:39:04,158 - INFO - length of train_dataloader: 10 +2025-06-28 14:39:07,534 - INFO - Epoch 27/50 - Train Loss: 1.152126, Val Loss: 0.949081 +2025-06-28 14:39:14,132 - INFO - type of total_loss: +2025-06-28 14:39:14,132 - INFO - total_loss value: 11.659118175506592 +2025-06-28 14:39:14,133 - INFO - length of train_dataloader: 10 +2025-06-28 14:39:18,620 - INFO - Epoch 28/50 - Train Loss: 1.165912, Val Loss: 0.953573 +2025-06-28 14:39:26,011 - INFO - type of total_loss: +2025-06-28 14:39:26,011 - INFO - total_loss value: 11.656983256340027 +2025-06-28 14:39:26,011 - INFO - length of train_dataloader: 10 +2025-06-28 14:39:30,475 - INFO - Epoch 29/50 - Train Loss: 1.165698, Val Loss: 0.954406 +2025-06-28 14:39:37,252 - INFO - type of total_loss: +2025-06-28 14:39:37,252 - INFO - total_loss value: 11.692601919174194 +2025-06-28 14:39:37,252 - INFO - length of train_dataloader: 10 +2025-06-28 14:39:40,564 - INFO - Epoch 30/50 - Train Loss: 1.169260, Val Loss: 0.953179 +2025-06-28 14:39:47,800 - INFO - type of total_loss: +2025-06-28 14:39:47,800 - INFO - total_loss value: 11.533832788467407 +2025-06-28 14:39:47,800 - INFO - length of train_dataloader: 10 +2025-06-28 14:39:51,886 - INFO - Epoch 31/50 - Train Loss: 1.153383, Val Loss: 0.956231 +2025-06-28 14:39:58,888 - INFO - type of total_loss: +2025-06-28 14:39:58,888 - INFO - total_loss value: 11.666002988815308 +2025-06-28 14:39:58,888 - INFO - length of train_dataloader: 10 +2025-06-28 14:40:02,782 - INFO - Epoch 32/50 - Train Loss: 1.166600, Val Loss: 0.951611 +2025-06-28 14:40:09,390 - INFO - type of total_loss: +2025-06-28 14:40:09,390 - INFO - total_loss value: 11.666587829589844 +2025-06-28 14:40:09,390 - INFO - length of train_dataloader: 10 +2025-06-28 14:40:10,448 - INFO - args.exp_name : Train_Test +2025-06-28 14:40:10,453 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 14:40:10,453 - INFO - Starting training with 1 GPUs +2025-06-28 14:40:13,091 - INFO - Total trainable parameters: 1437705 +2025-06-28 14:40:13,135 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 14:40:13,137 - INFO - Staring training for 50 epochs +2025-06-28 14:40:13,978 - INFO - Epoch 33/50 - Train Loss: 1.166659, Val Loss: 0.942014 +2025-06-28 14:40:21,253 - INFO - type of total_loss: +2025-06-28 14:40:21,253 - INFO - total_loss value: 11.67379629611969 +2025-06-28 14:40:21,253 - INFO - length of train_dataloader: 10 +2025-06-28 14:40:22,771 - INFO - type of total_loss: +2025-06-28 14:40:22,775 - INFO - total_loss value: 12.53324019908905 +2025-06-28 14:40:22,775 - INFO - length of train_dataloader: 10 +2025-06-28 14:40:24,855 - INFO - Epoch 34/50 - Train Loss: 1.167380, Val Loss: 0.929979 +2025-06-28 14:40:26,852 - INFO - Epoch 1/50 - Train Loss: 1.253324, Val Loss: 1.131323 +2025-06-28 14:40:26,884 - INFO - New best model saved with Val Loss: 1.131323 +2025-06-28 14:40:31,896 - INFO - type of total_loss: +2025-06-28 14:40:31,902 - INFO - total_loss value: 11.541240930557251 +2025-06-28 14:40:31,902 - INFO - length of train_dataloader: 10 +2025-06-28 14:40:33,690 - INFO - type of total_loss: +2025-06-28 14:40:33,695 - INFO - total_loss value: 11.883323550224304 +2025-06-28 14:40:33,696 - INFO - length of train_dataloader: 10 +2025-06-28 14:40:36,540 - INFO - Epoch 35/50 - Train Loss: 1.154124, Val Loss: 0.935797 +2025-06-28 14:40:37,444 - INFO - Epoch 2/50 - Train Loss: 1.188332, Val Loss: 1.119962 +2025-06-28 14:40:37,480 - INFO - New best model saved with Val Loss: 1.119962 +2025-06-28 14:40:43,997 - INFO - type of total_loss: +2025-06-28 14:40:44,001 - INFO - total_loss value: 11.633399605751038 +2025-06-28 14:40:44,001 - INFO - length of train_dataloader: 10 +2025-06-28 14:40:44,234 - INFO - type of total_loss: +2025-06-28 14:40:44,255 - INFO - total_loss value: 11.823585867881775 +2025-06-28 14:40:44,255 - INFO - length of train_dataloader: 10 +2025-06-28 14:40:47,418 - INFO - Epoch 36/50 - Train Loss: 1.163340, Val Loss: 0.937645 +2025-06-28 14:40:47,933 - INFO - Epoch 3/50 - Train Loss: 1.182359, Val Loss: 1.114902 +2025-06-28 14:40:47,963 - INFO - New best model saved with Val Loss: 1.114902 +2025-06-28 14:40:54,034 - INFO - type of total_loss: +2025-06-28 14:40:54,041 - INFO - total_loss value: 11.65716278553009 +2025-06-28 14:40:54,041 - INFO - length of train_dataloader: 10 +2025-06-28 14:40:55,094 - INFO - type of total_loss: +2025-06-28 14:40:55,102 - INFO - total_loss value: 11.843237161636353 +2025-06-28 14:40:55,102 - INFO - length of train_dataloader: 10 +2025-06-28 14:40:57,826 - INFO - Epoch 37/50 - Train Loss: 1.165716, Val Loss: 0.934001 +2025-06-28 14:40:58,623 - INFO - Epoch 4/50 - Train Loss: 1.184324, Val Loss: 1.109728 +2025-06-28 14:40:58,656 - INFO - New best model saved with Val Loss: 1.109728 +2025-06-28 14:41:05,483 - INFO - type of total_loss: +2025-06-28 14:41:05,483 - INFO - total_loss value: 11.776811599731445 +2025-06-28 14:41:05,483 - INFO - length of train_dataloader: 10 +2025-06-28 14:41:05,583 - INFO - type of total_loss: +2025-06-28 14:41:05,589 - INFO - total_loss value: 11.701975703239441 +2025-06-28 14:41:05,589 - INFO - length of train_dataloader: 10 +2025-06-28 14:41:09,072 - INFO - Epoch 5/50 - Train Loss: 1.177681, Val Loss: 1.104723 +2025-06-28 14:41:09,101 - INFO - New best model saved with Val Loss: 1.104723 +2025-06-28 14:41:09,156 - INFO - Epoch 38/50 - Train Loss: 1.170198, Val Loss: 0.939264 +2025-06-28 14:41:15,892 - INFO - type of total_loss: +2025-06-28 14:41:15,892 - INFO - total_loss value: 11.589049220085144 +2025-06-28 14:41:15,892 - INFO - length of train_dataloader: 10 +2025-06-28 14:41:15,932 - INFO - type of total_loss: +2025-06-28 14:41:15,939 - INFO - total_loss value: 11.750238299369812 +2025-06-28 14:41:15,939 - INFO - length of train_dataloader: 10 +2025-06-28 14:41:19,456 - INFO - Epoch 6/50 - Train Loss: 1.175024, Val Loss: 1.096504 +2025-06-28 14:41:19,481 - INFO - New best model saved with Val Loss: 1.096504 +2025-06-28 14:41:20,368 - INFO - Epoch 39/50 - Train Loss: 1.158905, Val Loss: 0.932368 +2025-06-28 14:41:26,522 - INFO - type of total_loss: +2025-06-28 14:41:26,525 - INFO - total_loss value: 11.775049209594727 +2025-06-28 14:41:26,525 - INFO - length of train_dataloader: 10 +2025-06-28 14:41:27,718 - INFO - type of total_loss: +2025-06-28 14:41:27,724 - INFO - total_loss value: 11.677146792411804 +2025-06-28 14:41:27,724 - INFO - length of train_dataloader: 10 +2025-06-28 14:41:30,484 - INFO - Epoch 7/50 - Train Loss: 1.177505, Val Loss: 1.082989 +2025-06-28 14:41:30,514 - INFO - New best model saved with Val Loss: 1.082989 +2025-06-28 14:41:31,148 - INFO - Epoch 40/50 - Train Loss: 1.167715, Val Loss: 0.936677 +2025-06-28 14:41:37,328 - INFO - type of total_loss: +2025-06-28 14:41:37,331 - INFO - total_loss value: 11.75777268409729 +2025-06-28 14:41:37,331 - INFO - length of train_dataloader: 10 +2025-06-28 14:41:38,811 - INFO - type of total_loss: +2025-06-28 14:41:38,817 - INFO - total_loss value: 11.620135068893433 +2025-06-28 14:41:38,817 - INFO - length of train_dataloader: 10 +2025-06-28 14:41:41,042 - INFO - Epoch 8/50 - Train Loss: 1.175777, Val Loss: 1.058120 +2025-06-28 14:41:41,072 - INFO - New best model saved with Val Loss: 1.058120 +2025-06-28 14:41:43,222 - INFO - Epoch 41/50 - Train Loss: 1.162014, Val Loss: 0.933867 +2025-06-28 14:41:47,873 - INFO - type of total_loss: +2025-06-28 14:41:47,876 - INFO - total_loss value: 11.703246235847473 +2025-06-28 14:41:47,876 - INFO - length of train_dataloader: 10 +2025-06-28 14:41:50,492 - INFO - type of total_loss: +2025-06-28 14:41:50,498 - INFO - total_loss value: 11.648094654083252 +2025-06-28 14:41:50,498 - INFO - length of train_dataloader: 10 +2025-06-28 14:41:51,580 - INFO - Epoch 9/50 - Train Loss: 1.170325, Val Loss: 1.028193 +2025-06-28 14:41:51,609 - INFO - New best model saved with Val Loss: 1.028193 +2025-06-28 14:41:54,670 - INFO - Epoch 42/50 - Train Loss: 1.164809, Val Loss: 0.930149 +2025-06-28 14:41:58,580 - INFO - type of total_loss: +2025-06-28 14:41:58,583 - INFO - total_loss value: 11.609472751617432 +2025-06-28 14:41:58,583 - INFO - length of train_dataloader: 10 +2025-06-28 14:42:01,638 - INFO - type of total_loss: +2025-06-28 14:42:01,643 - INFO - total_loss value: 11.515152335166931 +2025-06-28 14:42:01,643 - INFO - length of train_dataloader: 10 +2025-06-28 14:42:02,316 - INFO - Epoch 10/50 - Train Loss: 1.160947, Val Loss: 1.010467 +2025-06-28 14:42:02,347 - INFO - New best model saved with Val Loss: 1.010467 +2025-06-28 14:42:04,976 - INFO - Epoch 43/50 - Train Loss: 1.151515, Val Loss: 0.921296 +2025-06-28 14:42:09,219 - INFO - type of total_loss: +2025-06-28 14:42:09,221 - INFO - total_loss value: 11.738473534584045 +2025-06-28 14:42:09,222 - INFO - length of train_dataloader: 10 +2025-06-28 14:42:12,746 - INFO - Epoch 11/50 - Train Loss: 1.173847, Val Loss: 0.980210 +2025-06-28 14:42:12,764 - INFO - type of total_loss: +2025-06-28 14:42:12,771 - INFO - total_loss value: 11.655946016311646 +2025-06-28 14:42:12,771 - INFO - length of train_dataloader: 10 +2025-06-28 14:42:12,771 - INFO - New best model saved with Val Loss: 0.980210 +2025-06-28 14:42:17,573 - INFO - Epoch 44/50 - Train Loss: 1.165595, Val Loss: 0.919199 +2025-06-28 14:42:19,616 - INFO - type of total_loss: +2025-06-28 14:42:19,624 - INFO - total_loss value: 11.720927715301514 +2025-06-28 14:42:19,624 - INFO - length of train_dataloader: 10 +2025-06-28 14:42:23,340 - INFO - Epoch 12/50 - Train Loss: 1.172093, Val Loss: 1.007287 +2025-06-28 14:42:29,255 - INFO - type of total_loss: +2025-06-28 14:42:29,259 - INFO - total_loss value: 11.615602850914001 +2025-06-28 14:42:29,259 - INFO - length of train_dataloader: 10 +2025-06-28 14:42:30,334 - INFO - type of total_loss: +2025-06-28 14:42:30,340 - INFO - total_loss value: 11.553749322891235 +2025-06-28 14:42:30,340 - INFO - length of train_dataloader: 10 +2025-06-28 14:42:33,872 - INFO - Epoch 13/50 - Train Loss: 1.155375, Val Loss: 0.980951 +2025-06-28 14:42:35,101 - INFO - Epoch 45/50 - Train Loss: 1.161560, Val Loss: 0.922296 +2025-06-28 14:42:40,791 - INFO - type of total_loss: +2025-06-28 14:42:40,795 - INFO - total_loss value: 11.684824585914612 +2025-06-28 14:42:40,795 - INFO - length of train_dataloader: 10 +2025-06-28 14:42:42,060 - INFO - type of total_loss: +2025-06-28 14:42:42,065 - INFO - total_loss value: 11.579261660575867 +2025-06-28 14:42:42,066 - INFO - length of train_dataloader: 10 +2025-06-28 14:42:44,550 - INFO - Epoch 14/50 - Train Loss: 1.168482, Val Loss: 0.981301 +2025-06-28 14:42:46,269 - INFO - Epoch 46/50 - Train Loss: 1.157926, Val Loss: 0.921191 +2025-06-28 14:42:51,411 - INFO - type of total_loss: +2025-06-28 14:42:51,414 - INFO - total_loss value: 11.664612531661987 +2025-06-28 14:42:51,414 - INFO - length of train_dataloader: 10 +2025-06-28 14:42:53,259 - INFO - type of total_loss: +2025-06-28 14:42:53,264 - INFO - total_loss value: 11.64852499961853 +2025-06-28 14:42:53,264 - INFO - length of train_dataloader: 10 +2025-06-28 14:42:55,156 - INFO - Epoch 15/50 - Train Loss: 1.166461, Val Loss: 0.970300 +2025-06-28 14:42:55,185 - INFO - New best model saved with Val Loss: 0.970300 +2025-06-28 14:42:56,548 - INFO - Epoch 47/50 - Train Loss: 1.164852, Val Loss: 0.932208 +2025-06-28 14:43:02,263 - INFO - type of total_loss: +2025-06-28 14:43:02,267 - INFO - total_loss value: 11.67478322982788 +2025-06-28 14:43:02,267 - INFO - length of train_dataloader: 10 +2025-06-28 14:43:05,724 - INFO - type of total_loss: +2025-06-28 14:43:05,733 - INFO - total_loss value: 11.659417510032654 +2025-06-28 14:43:05,733 - INFO - length of train_dataloader: 10 +2025-06-28 14:43:05,763 - INFO - Epoch 16/50 - Train Loss: 1.167478, Val Loss: 0.976532 +2025-06-28 14:43:11,421 - INFO - Epoch 48/50 - Train Loss: 1.165942, Val Loss: 0.941657 +2025-06-28 14:43:12,622 - INFO - type of total_loss: +2025-06-28 14:43:12,628 - INFO - total_loss value: 11.674216032028198 +2025-06-28 14:43:12,628 - INFO - length of train_dataloader: 10 +2025-06-28 14:43:16,383 - INFO - Epoch 17/50 - Train Loss: 1.167422, Val Loss: 0.979567 +2025-06-28 14:43:18,681 - INFO - type of total_loss: +2025-06-28 14:43:18,688 - INFO - total_loss value: 11.51139783859253 +2025-06-28 14:43:18,688 - INFO - length of train_dataloader: 10 +2025-06-28 14:43:22,166 - INFO - Epoch 49/50 - Train Loss: 1.151140, Val Loss: 0.926594 +2025-06-28 14:43:23,304 - INFO - type of total_loss: +2025-06-28 14:43:23,311 - INFO - total_loss value: 11.67242169380188 +2025-06-28 14:43:23,311 - INFO - length of train_dataloader: 10 +2025-06-28 14:43:26,987 - INFO - Epoch 18/50 - Train Loss: 1.167242, Val Loss: 0.970081 +2025-06-28 14:43:27,011 - INFO - New best model saved with Val Loss: 0.970081 +2025-06-28 14:43:28,758 - INFO - type of total_loss: +2025-06-28 14:43:28,767 - INFO - total_loss value: 11.644763588905334 +2025-06-28 14:43:28,767 - INFO - length of train_dataloader: 10 +2025-06-28 14:43:32,167 - INFO - Epoch 50/50 - Train Loss: 1.164476, Val Loss: 0.927823 +2025-06-28 14:43:34,035 - INFO - type of total_loss: +2025-06-28 14:43:34,040 - INFO - total_loss value: 11.638634324073792 +2025-06-28 14:43:34,040 - INFO - length of train_dataloader: 10 +2025-06-28 14:43:37,555 - INFO - Epoch 19/50 - Train Loss: 1.163863, Val Loss: 0.967578 +2025-06-28 14:43:37,580 - INFO - New best model saved with Val Loss: 0.967578 +2025-06-28 14:43:44,613 - INFO - type of total_loss: +2025-06-28 14:43:44,613 - INFO - total_loss value: 11.648188829421997 +2025-06-28 14:43:44,613 - INFO - length of train_dataloader: 10 +2025-06-28 14:43:48,618 - INFO - Epoch 20/50 - Train Loss: 1.164819, Val Loss: 0.955320 +2025-06-28 14:43:48,643 - INFO - New best model saved with Val Loss: 0.955320 +2025-06-28 14:43:58,388 - INFO - type of total_loss: +2025-06-28 14:43:58,388 - INFO - total_loss value: 11.66547679901123 +2025-06-28 14:43:58,388 - INFO - length of train_dataloader: 10 +2025-06-28 14:44:02,054 - INFO - Epoch 21/50 - Train Loss: 1.166548, Val Loss: 0.952791 +2025-06-28 14:44:02,088 - INFO - New best model saved with Val Loss: 0.952791 +2025-06-28 14:44:08,959 - INFO - type of total_loss: +2025-06-28 14:44:08,960 - INFO - total_loss value: 11.666450500488281 +2025-06-28 14:44:08,960 - INFO - length of train_dataloader: 10 +2025-06-28 14:44:12,752 - INFO - Epoch 22/50 - Train Loss: 1.166645, Val Loss: 0.953860 +2025-06-28 14:44:19,852 - INFO - type of total_loss: +2025-06-28 14:44:19,852 - INFO - total_loss value: 11.671009302139282 +2025-06-28 14:44:19,852 - INFO - length of train_dataloader: 10 +2025-06-28 14:44:23,377 - INFO - Epoch 23/50 - Train Loss: 1.167101, Val Loss: 0.951229 +2025-06-28 14:44:23,401 - INFO - New best model saved with Val Loss: 0.951229 +2025-06-28 14:44:30,449 - INFO - type of total_loss: +2025-06-28 14:44:30,449 - INFO - total_loss value: 11.672417163848877 +2025-06-28 14:44:30,449 - INFO - length of train_dataloader: 10 +2025-06-28 14:44:34,011 - INFO - Epoch 24/50 - Train Loss: 1.167242, Val Loss: 0.957333 +2025-06-28 14:44:40,926 - INFO - type of total_loss: +2025-06-28 14:44:40,926 - INFO - total_loss value: 11.652331113815308 +2025-06-28 14:44:40,926 - INFO - length of train_dataloader: 10 +2025-06-28 14:44:44,462 - INFO - Epoch 25/50 - Train Loss: 1.165233, Val Loss: 0.958300 +2025-06-28 14:44:51,510 - INFO - type of total_loss: +2025-06-28 14:44:51,510 - INFO - total_loss value: 11.66399097442627 +2025-06-28 14:44:51,510 - INFO - length of train_dataloader: 10 +2025-06-28 14:44:55,034 - INFO - Epoch 26/50 - Train Loss: 1.166399, Val Loss: 0.944509 +2025-06-28 14:44:55,058 - INFO - New best model saved with Val Loss: 0.944509 +2025-06-28 14:45:01,907 - INFO - type of total_loss: +2025-06-28 14:45:01,907 - INFO - total_loss value: 11.5212641954422 +2025-06-28 14:45:01,907 - INFO - length of train_dataloader: 10 +2025-06-28 14:45:05,431 - INFO - Epoch 27/50 - Train Loss: 1.152126, Val Loss: 0.949081 +2025-06-28 14:45:12,353 - INFO - type of total_loss: +2025-06-28 14:45:12,353 - INFO - total_loss value: 11.659118175506592 +2025-06-28 14:45:12,353 - INFO - length of train_dataloader: 10 +2025-06-28 14:45:16,062 - INFO - Epoch 28/50 - Train Loss: 1.165912, Val Loss: 0.953573 +2025-06-28 14:45:23,171 - INFO - type of total_loss: +2025-06-28 14:45:23,171 - INFO - total_loss value: 11.656983256340027 +2025-06-28 14:45:23,171 - INFO - length of train_dataloader: 10 +2025-06-28 14:45:26,851 - INFO - Epoch 29/50 - Train Loss: 1.165698, Val Loss: 0.954406 +2025-06-28 14:45:33,679 - INFO - type of total_loss: +2025-06-28 14:45:33,679 - INFO - total_loss value: 11.692601919174194 +2025-06-28 14:45:33,679 - INFO - length of train_dataloader: 10 +2025-06-28 14:45:37,192 - INFO - Epoch 30/50 - Train Loss: 1.169260, Val Loss: 0.953179 +2025-06-28 14:45:44,244 - INFO - type of total_loss: +2025-06-28 14:45:44,244 - INFO - total_loss value: 11.533832788467407 +2025-06-28 14:45:44,244 - INFO - length of train_dataloader: 10 +2025-06-28 14:45:47,943 - INFO - Epoch 31/50 - Train Loss: 1.153383, Val Loss: 0.956231 +2025-06-28 14:45:54,821 - INFO - type of total_loss: +2025-06-28 14:45:54,821 - INFO - total_loss value: 11.666002988815308 +2025-06-28 14:45:54,821 - INFO - length of train_dataloader: 10 +2025-06-28 14:45:58,474 - INFO - Epoch 32/50 - Train Loss: 1.166600, Val Loss: 0.951611 +2025-06-28 14:46:05,287 - INFO - type of total_loss: +2025-06-28 14:46:05,288 - INFO - total_loss value: 11.666587829589844 +2025-06-28 14:46:05,288 - INFO - length of train_dataloader: 10 +2025-06-28 14:46:08,847 - INFO - Epoch 33/50 - Train Loss: 1.166659, Val Loss: 0.942014 +2025-06-28 14:46:08,873 - INFO - New best model saved with Val Loss: 0.942014 +2025-06-28 14:46:15,794 - INFO - type of total_loss: +2025-06-28 14:46:15,794 - INFO - total_loss value: 11.67379629611969 +2025-06-28 14:46:15,795 - INFO - length of train_dataloader: 10 +2025-06-28 14:46:19,286 - INFO - Epoch 34/50 - Train Loss: 1.167380, Val Loss: 0.929979 +2025-06-28 14:46:19,310 - INFO - New best model saved with Val Loss: 0.929979 +2025-06-28 14:46:26,183 - INFO - type of total_loss: +2025-06-28 14:46:26,184 - INFO - total_loss value: 11.541240930557251 +2025-06-28 14:46:26,184 - INFO - length of train_dataloader: 10 +2025-06-28 14:46:29,644 - INFO - Epoch 35/50 - Train Loss: 1.154124, Val Loss: 0.935797 +2025-06-28 14:46:36,388 - INFO - type of total_loss: +2025-06-28 14:46:36,388 - INFO - total_loss value: 11.633399605751038 +2025-06-28 14:46:36,388 - INFO - length of train_dataloader: 10 +2025-06-28 14:46:40,093 - INFO - Epoch 36/50 - Train Loss: 1.163340, Val Loss: 0.937645 +2025-06-28 14:46:46,984 - INFO - type of total_loss: +2025-06-28 14:46:46,984 - INFO - total_loss value: 11.65716278553009 +2025-06-28 14:46:46,984 - INFO - length of train_dataloader: 10 +2025-06-28 14:46:50,704 - INFO - Epoch 37/50 - Train Loss: 1.165716, Val Loss: 0.934001 +2025-06-28 14:46:57,548 - INFO - type of total_loss: +2025-06-28 14:46:57,548 - INFO - total_loss value: 11.701975703239441 +2025-06-28 14:46:57,549 - INFO - length of train_dataloader: 10 +2025-06-28 14:47:01,279 - INFO - Epoch 38/50 - Train Loss: 1.170198, Val Loss: 0.939264 +2025-06-28 14:47:08,077 - INFO - type of total_loss: +2025-06-28 14:47:08,077 - INFO - total_loss value: 11.589049220085144 +2025-06-28 14:47:08,077 - INFO - length of train_dataloader: 10 +2025-06-28 14:47:12,026 - INFO - Epoch 39/50 - Train Loss: 1.158905, Val Loss: 0.932368 +2025-06-28 14:47:18,851 - INFO - type of total_loss: +2025-06-28 14:47:18,851 - INFO - total_loss value: 11.677146792411804 +2025-06-28 14:47:18,851 - INFO - length of train_dataloader: 10 +2025-06-28 14:47:24,188 - INFO - Epoch 40/50 - Train Loss: 1.167715, Val Loss: 0.936677 +2025-06-28 14:47:31,213 - INFO - type of total_loss: +2025-06-28 14:47:31,213 - INFO - total_loss value: 11.620135068893433 +2025-06-28 14:47:31,213 - INFO - length of train_dataloader: 10 +2025-06-28 14:47:34,956 - INFO - Epoch 41/50 - Train Loss: 1.162014, Val Loss: 0.933867 +2025-06-28 14:47:41,809 - INFO - type of total_loss: +2025-06-28 14:47:41,809 - INFO - total_loss value: 11.648094654083252 +2025-06-28 14:47:41,809 - INFO - length of train_dataloader: 10 +2025-06-28 14:47:45,384 - INFO - Epoch 42/50 - Train Loss: 1.164809, Val Loss: 0.930149 +2025-06-28 14:47:52,231 - INFO - type of total_loss: +2025-06-28 14:47:52,232 - INFO - total_loss value: 11.515152335166931 +2025-06-28 14:47:52,232 - INFO - length of train_dataloader: 10 +2025-06-28 14:47:55,943 - INFO - Epoch 43/50 - Train Loss: 1.151515, Val Loss: 0.921296 +2025-06-28 14:47:55,968 - INFO - New best model saved with Val Loss: 0.921296 +2025-06-28 14:48:02,956 - INFO - type of total_loss: +2025-06-28 14:48:02,956 - INFO - total_loss value: 11.655946016311646 +2025-06-28 14:48:02,956 - INFO - length of train_dataloader: 10 +2025-06-28 14:48:06,420 - INFO - Epoch 44/50 - Train Loss: 1.165595, Val Loss: 0.919199 +2025-06-28 14:48:06,445 - INFO - New best model saved with Val Loss: 0.919199 +2025-06-28 14:48:13,318 - INFO - type of total_loss: +2025-06-28 14:48:13,318 - INFO - total_loss value: 11.615602850914001 +2025-06-28 14:48:13,318 - INFO - length of train_dataloader: 10 +2025-06-28 14:48:16,841 - INFO - Epoch 45/50 - Train Loss: 1.161560, Val Loss: 0.922296 +2025-06-28 14:48:23,944 - INFO - type of total_loss: +2025-06-28 14:48:23,944 - INFO - total_loss value: 11.579261660575867 +2025-06-28 14:48:23,944 - INFO - length of train_dataloader: 10 +2025-06-28 14:48:27,406 - INFO - Epoch 46/50 - Train Loss: 1.157926, Val Loss: 0.921191 +2025-06-28 14:48:34,484 - INFO - type of total_loss: +2025-06-28 14:48:34,484 - INFO - total_loss value: 11.64852499961853 +2025-06-28 14:48:34,484 - INFO - length of train_dataloader: 10 +2025-06-28 14:48:38,028 - INFO - Epoch 47/50 - Train Loss: 1.164852, Val Loss: 0.932208 +2025-06-28 14:48:45,180 - INFO - type of total_loss: +2025-06-28 14:48:45,180 - INFO - total_loss value: 11.659417510032654 +2025-06-28 14:48:45,180 - INFO - length of train_dataloader: 10 +2025-06-28 14:48:48,711 - INFO - Epoch 48/50 - Train Loss: 1.165942, Val Loss: 0.941657 +2025-06-28 14:48:55,617 - INFO - type of total_loss: +2025-06-28 14:48:55,617 - INFO - total_loss value: 11.51139783859253 +2025-06-28 14:48:55,617 - INFO - length of train_dataloader: 10 +2025-06-28 14:48:59,332 - INFO - Epoch 49/50 - Train Loss: 1.151140, Val Loss: 0.926594 +2025-06-28 14:49:06,162 - INFO - type of total_loss: +2025-06-28 14:49:06,163 - INFO - total_loss value: 11.644763588905334 +2025-06-28 14:49:06,163 - INFO - length of train_dataloader: 10 +2025-06-28 14:49:09,682 - INFO - Epoch 50/50 - Train Loss: 1.164476, Val Loss: 0.927823 +2025-06-28 15:29:34,654 - INFO - args.exp_name : Train_Test +2025-06-28 15:29:34,656 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=10, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 15:29:34,656 - INFO - Starting training with 1 GPUs +2025-06-28 15:29:39,467 - INFO - Total trainable parameters: 1437705 +2025-06-28 15:29:39,543 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 15:29:39,547 - INFO - Staring training for 10 epochs +2025-06-28 15:29:57,103 - INFO - Epoch 1/10 - Train Loss: 1.253324, Val Loss: 1.131323 +2025-06-28 15:29:57,140 - INFO - New best model saved with Val Loss: 1.131323 +2025-06-28 15:30:14,067 - INFO - Epoch 2/10 - Train Loss: 1.188332, Val Loss: 1.119962 +2025-06-28 15:30:14,088 - INFO - New best model saved with Val Loss: 1.119962 +2025-06-28 15:53:07,367 - INFO - args.exp_name : Train_Test +2025-06-28 15:53:07,368 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 15:53:07,368 - INFO - Starting training with 1 GPUs +2025-06-28 15:53:13,012 - INFO - Total trainable parameters: 1437705 +2025-06-28 15:53:13,074 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 15:53:13,079 - INFO - Staring training for 12 epochs +2025-06-28 15:53:31,298 - INFO - Epoch 1/12 - Train Loss: 1.253324, Val Loss: 1.131323 +2025-06-28 15:53:31,318 - INFO - New best model saved with Val Loss: 1.131323 +2025-06-28 15:53:43,449 - INFO - Epoch 2/12 - Train Loss: 1.188332, Val Loss: 1.119962 +2025-06-28 15:53:43,466 - INFO - New best model saved with Val Loss: 1.119962 +2025-06-28 15:54:03,278 - INFO - Epoch 3/12 - Train Loss: 1.182359, Val Loss: 1.114902 +2025-06-28 15:54:03,300 - INFO - New best model saved with Val Loss: 1.114902 +2025-06-28 15:54:15,478 - INFO - Epoch 4/12 - Train Loss: 1.184324, Val Loss: 1.109728 +2025-06-28 15:54:15,494 - INFO - New best model saved with Val Loss: 1.109728 +2025-06-28 15:54:27,598 - INFO - Epoch 5/12 - Train Loss: 1.177681, Val Loss: 1.104723 +2025-06-28 15:54:27,613 - INFO - New best model saved with Val Loss: 1.104723 +2025-06-28 15:54:43,449 - INFO - Epoch 6/12 - Train Loss: 1.175024, Val Loss: 1.096504 +2025-06-28 15:54:43,481 - INFO - New best model saved with Val Loss: 1.096504 +2025-06-28 15:54:55,691 - INFO - Epoch 7/12 - Train Loss: 1.177505, Val Loss: 1.082989 +2025-06-28 15:54:55,716 - INFO - New best model saved with Val Loss: 1.082989 +2025-06-28 15:55:08,434 - INFO - Epoch 8/12 - Train Loss: 1.175777, Val Loss: 1.058120 +2025-06-28 15:55:08,452 - INFO - New best model saved with Val Loss: 1.058120 +2025-06-28 15:55:19,027 - INFO - args.exp_name : Train_Test +2025-06-28 15:55:19,030 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 15:55:19,030 - INFO - Starting training with 1 GPUs +2025-06-28 15:55:21,271 - INFO - Total trainable parameters: 1437705 +2025-06-28 15:55:21,341 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 15:55:21,342 - INFO - Staring training for 12 epochs +2025-06-28 15:55:25,215 - INFO - Epoch 9/12 - Train Loss: 1.170325, Val Loss: 1.028193 +2025-06-28 15:55:25,241 - INFO - New best model saved with Val Loss: 1.028193 +2025-06-28 15:55:33,807 - INFO - Epoch 1/12 - Train Loss: 1.253324, Val Loss: 1.131323 +2025-06-28 15:55:33,839 - INFO - New best model saved with Val Loss: 1.131323 +2025-06-28 15:55:44,147 - INFO - Epoch 2/12 - Train Loss: 1.188332, Val Loss: 1.119962 +2025-06-28 15:55:44,172 - INFO - New best model saved with Val Loss: 1.119962 +2025-06-28 15:55:54,593 - INFO - Epoch 3/12 - Train Loss: 1.182359, Val Loss: 1.114902 +2025-06-28 15:55:54,617 - INFO - New best model saved with Val Loss: 1.114902 +2025-06-28 15:56:04,812 - INFO - Epoch 4/12 - Train Loss: 1.184324, Val Loss: 1.109728 +2025-06-28 15:56:04,836 - INFO - New best model saved with Val Loss: 1.109728 +2025-06-28 15:56:15,217 - INFO - Epoch 5/12 - Train Loss: 1.177681, Val Loss: 1.104723 +2025-06-28 15:56:15,242 - INFO - New best model saved with Val Loss: 1.104723 +2025-06-28 15:56:25,570 - INFO - Epoch 6/12 - Train Loss: 1.175024, Val Loss: 1.096504 +2025-06-28 15:56:25,594 - INFO - New best model saved with Val Loss: 1.096504 +2025-06-28 15:56:35,848 - INFO - Epoch 7/12 - Train Loss: 1.177505, Val Loss: 1.082989 +2025-06-28 15:56:35,873 - INFO - New best model saved with Val Loss: 1.082989 +2025-06-28 15:56:46,181 - INFO - Epoch 8/12 - Train Loss: 1.175777, Val Loss: 1.058120 +2025-06-28 15:56:46,205 - INFO - New best model saved with Val Loss: 1.058120 +2025-06-28 15:56:56,787 - INFO - Epoch 9/12 - Train Loss: 1.170325, Val Loss: 1.028193 +2025-06-28 15:56:56,812 - INFO - New best model saved with Val Loss: 1.028193 +2025-06-28 15:57:07,466 - INFO - Epoch 10/12 - Train Loss: 1.160947, Val Loss: 1.010467 +2025-06-28 15:57:07,490 - INFO - New best model saved with Val Loss: 1.010467 +2025-06-28 16:06:12,487 - INFO - args.exp_name : Train_Test +2025-06-28 16:06:12,491 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 16:06:12,491 - INFO - Starting training with 1 GPUs +2025-06-28 16:06:19,858 - INFO - Total trainable parameters: 1437705 +2025-06-28 16:06:19,920 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 16:06:19,925 - INFO - Staring training for 12 epochs +2025-06-28 16:06:42,263 - INFO - Epoch 1/12 - Train Loss: 1.253324, Val Loss: 1.131323 +2025-06-28 16:06:42,332 - INFO - New best model saved with Val Loss: 1.131323 +2025-06-28 16:07:03,464 - INFO - Epoch 2/12 - Train Loss: 1.188332, Val Loss: 1.119962 +2025-06-28 16:07:03,484 - INFO - New best model saved with Val Loss: 1.119962 +2025-06-28 16:07:19,778 - INFO - Epoch 3/12 - Train Loss: 1.182359, Val Loss: 1.114902 +2025-06-28 16:07:19,795 - INFO - New best model saved with Val Loss: 1.114902 +2025-06-28 16:07:32,749 - INFO - Epoch 4/12 - Train Loss: 1.184324, Val Loss: 1.109728 +2025-06-28 16:07:32,765 - INFO - New best model saved with Val Loss: 1.109728 +2025-06-28 16:07:45,024 - INFO - Epoch 5/12 - Train Loss: 1.177681, Val Loss: 1.104723 +2025-06-28 16:07:45,041 - INFO - New best model saved with Val Loss: 1.104723 +2025-06-28 16:07:57,786 - INFO - Epoch 6/12 - Train Loss: 1.175024, Val Loss: 1.096504 +2025-06-28 16:07:57,804 - INFO - New best model saved with Val Loss: 1.096504 +2025-06-28 16:08:11,182 - INFO - Epoch 7/12 - Train Loss: 1.177505, Val Loss: 1.082989 +2025-06-28 16:08:11,203 - INFO - New best model saved with Val Loss: 1.082989 +2025-06-28 16:08:23,350 - INFO - Epoch 8/12 - Train Loss: 1.175777, Val Loss: 1.058120 +2025-06-28 16:08:23,367 - INFO - New best model saved with Val Loss: 1.058120 +2025-06-28 16:08:38,221 - INFO - Epoch 9/12 - Train Loss: 1.170325, Val Loss: 1.028193 +2025-06-28 16:08:38,240 - INFO - New best model saved with Val Loss: 1.028193 +2025-06-28 16:08:52,473 - INFO - Epoch 10/12 - Train Loss: 1.160947, Val Loss: 1.010467 +2025-06-28 16:08:52,492 - INFO - New best model saved with Val Loss: 1.010467 +2025-06-28 16:12:47,693 - INFO - args.exp_name : Train_Test +2025-06-28 16:12:47,695 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 16:12:47,695 - INFO - Starting training with 1 GPUs +2025-06-28 16:12:54,493 - INFO - Total trainable parameters: 1437705 +2025-06-28 16:12:54,551 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 16:12:54,558 - INFO - Staring training for 12 epochs +2025-06-28 16:13:19,223 - INFO - Epoch 1/12 - Train Loss: 1.253324, Val Loss: 1.131323 +2025-06-28 16:13:19,252 - INFO - New best model saved with Val Loss: 1.131323 +2025-06-28 16:13:34,872 - INFO - Epoch 2/12 - Train Loss: 1.188332, Val Loss: 1.119962 +2025-06-28 16:13:34,896 - INFO - New best model saved with Val Loss: 1.119962 +2025-06-28 16:13:48,952 - INFO - Epoch 3/12 - Train Loss: 1.182359, Val Loss: 1.114902 +2025-06-28 16:13:48,980 - INFO - New best model saved with Val Loss: 1.114902 +2025-06-28 16:14:06,875 - INFO - Epoch 4/12 - Train Loss: 1.184324, Val Loss: 1.109728 +2025-06-28 16:14:06,898 - INFO - New best model saved with Val Loss: 1.109728 +2025-06-28 16:14:22,948 - INFO - Epoch 5/12 - Train Loss: 1.177681, Val Loss: 1.104723 +2025-06-28 16:14:22,969 - INFO - New best model saved with Val Loss: 1.104723 +2025-06-28 16:14:38,304 - INFO - Epoch 6/12 - Train Loss: 1.175024, Val Loss: 1.096504 +2025-06-28 16:14:38,329 - INFO - New best model saved with Val Loss: 1.096504 +2025-06-28 16:14:52,969 - INFO - Epoch 7/12 - Train Loss: 1.177505, Val Loss: 1.082989 +2025-06-28 16:14:52,993 - INFO - New best model saved with Val Loss: 1.082989 +2025-06-28 16:15:08,834 - INFO - Epoch 8/12 - Train Loss: 1.175777, Val Loss: 1.058120 +2025-06-28 16:15:08,856 - INFO - New best model saved with Val Loss: 1.058120 +2025-06-28 16:15:23,602 - INFO - Epoch 9/12 - Train Loss: 1.170325, Val Loss: 1.028193 +2025-06-28 16:15:23,624 - INFO - New best model saved with Val Loss: 1.028193 +2025-06-28 16:15:37,812 - INFO - Epoch 10/12 - Train Loss: 1.160947, Val Loss: 1.010467 +2025-06-28 16:15:37,830 - INFO - New best model saved with Val Loss: 1.010467 +2025-06-28 16:28:22,451 - INFO - args.exp_name : Train_Test +2025-06-28 16:28:22,461 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 16:28:22,461 - INFO - Starting training with 1 GPUs +2025-06-28 16:28:27,748 - INFO - Total trainable parameters: 1437705 +2025-06-28 16:28:27,821 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 16:28:27,828 - INFO - Staring training for 12 epochs +2025-06-28 16:28:45,439 - INFO - Epoch 1/12 - Train Loss: 1.253324, Val Loss: 1.131323 +2025-06-28 16:28:45,459 - INFO - New best model saved with Val Loss: 1.131323 +2025-06-28 16:28:57,528 - INFO - Epoch 2/12 - Train Loss: 1.188332, Val Loss: 1.119962 +2025-06-28 16:28:57,548 - INFO - New best model saved with Val Loss: 1.119962 +2025-06-28 16:29:15,218 - INFO - Epoch 3/12 - Train Loss: 1.182359, Val Loss: 1.114902 +2025-06-28 16:29:15,239 - INFO - New best model saved with Val Loss: 1.114902 +2025-06-28 16:29:28,435 - INFO - Epoch 4/12 - Train Loss: 1.184324, Val Loss: 1.109728 +2025-06-28 16:29:28,454 - INFO - New best model saved with Val Loss: 1.109728 +2025-06-28 16:29:53,630 - INFO - Epoch 5/12 - Train Loss: 1.177681, Val Loss: 1.104723 +2025-06-28 16:29:53,646 - INFO - New best model saved with Val Loss: 1.104723 +2025-06-28 16:30:08,471 - INFO - Epoch 6/12 - Train Loss: 1.175024, Val Loss: 1.096504 +2025-06-28 16:30:08,490 - INFO - New best model saved with Val Loss: 1.096504 +2025-06-28 16:30:21,706 - INFO - Epoch 7/12 - Train Loss: 1.177505, Val Loss: 1.082989 +2025-06-28 16:30:21,725 - INFO - New best model saved with Val Loss: 1.082989 +2025-06-28 16:30:53,740 - INFO - Epoch 8/12 - Train Loss: 1.175777, Val Loss: 1.058120 +2025-06-28 16:30:53,757 - INFO - New best model saved with Val Loss: 1.058120 +2025-06-28 16:31:09,345 - INFO - Epoch 9/12 - Train Loss: 1.170325, Val Loss: 1.028193 +2025-06-28 16:31:09,363 - INFO - New best model saved with Val Loss: 1.028193 +2025-06-28 16:31:22,986 - INFO - Epoch 10/12 - Train Loss: 1.160947, Val Loss: 1.010467 +2025-06-28 16:31:23,006 - INFO - New best model saved with Val Loss: 1.010467 +2025-06-28 16:31:36,627 - INFO - Epoch 11/12 - Train Loss: 1.173847, Val Loss: 0.980210 +2025-06-28 16:31:36,647 - INFO - New best model saved with Val Loss: 0.980210 +2025-06-28 16:31:48,878 - INFO - Epoch 12/12 - Train Loss: 1.172093, Val Loss: 1.007287 +2025-06-28 16:31:49,003 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-28 16:36:21,974 - INFO - args.exp_name : Train_Test +2025-06-28 16:36:21,983 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 16:36:21,984 - INFO - Starting training with 1 GPUs +2025-06-28 16:36:27,948 - INFO - Total trainable parameters: 1437705 +2025-06-28 16:36:28,010 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 16:36:28,014 - INFO - Staring training for 12 epochs +2025-06-28 16:37:01,024 - INFO - Epoch 1/12 - Train Loss: 1.253324, Val Loss: 1.131323 +2025-06-28 16:37:01,086 - INFO - New best model saved with Val Loss: 1.131323 +2025-06-28 16:37:16,122 - INFO - Epoch 2/12 - Train Loss: 1.188332, Val Loss: 1.119962 +2025-06-28 16:37:16,154 - INFO - New best model saved with Val Loss: 1.119962 +2025-06-28 16:37:27,172 - INFO - Epoch 3/12 - Train Loss: 1.182359, Val Loss: 1.114902 +2025-06-28 16:37:27,188 - INFO - New best model saved with Val Loss: 1.114902 +2025-06-28 16:37:38,405 - INFO - Epoch 4/12 - Train Loss: 1.184324, Val Loss: 1.109728 +2025-06-28 16:37:38,421 - INFO - New best model saved with Val Loss: 1.109728 +2025-06-28 16:37:49,869 - INFO - Epoch 5/12 - Train Loss: 1.177681, Val Loss: 1.104723 +2025-06-28 16:37:49,886 - INFO - New best model saved with Val Loss: 1.104723 +2025-06-28 16:38:01,142 - INFO - Epoch 6/12 - Train Loss: 1.175024, Val Loss: 1.096504 +2025-06-28 16:38:01,159 - INFO - New best model saved with Val Loss: 1.096504 +2025-06-28 16:38:11,445 - INFO - Epoch 7/12 - Train Loss: 1.177505, Val Loss: 1.082989 +2025-06-28 16:38:11,462 - INFO - New best model saved with Val Loss: 1.082989 +2025-06-28 16:38:23,944 - INFO - Epoch 8/12 - Train Loss: 1.175777, Val Loss: 1.058120 +2025-06-28 16:38:23,963 - INFO - New best model saved with Val Loss: 1.058120 +2025-06-28 16:38:35,278 - INFO - Epoch 9/12 - Train Loss: 1.170325, Val Loss: 1.028193 +2025-06-28 16:38:35,294 - INFO - New best model saved with Val Loss: 1.028193 +2025-06-28 16:38:45,989 - INFO - Epoch 10/12 - Train Loss: 1.160947, Val Loss: 1.010467 +2025-06-28 16:38:46,003 - INFO - New best model saved with Val Loss: 1.010467 +2025-06-28 16:38:56,776 - INFO - Epoch 11/12 - Train Loss: 1.173847, Val Loss: 0.980210 +2025-06-28 16:38:56,794 - INFO - New best model saved with Val Loss: 0.980210 +2025-06-28 16:39:09,038 - INFO - Epoch 12/12 - Train Loss: 1.172093, Val Loss: 1.007287 +2025-06-28 16:39:09,168 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-28 16:44:58,608 - INFO - args.exp_name : Train_Test +2025-06-28 16:44:58,610 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 16:44:58,610 - INFO - Starting training with 1 GPUs +2025-06-28 16:45:03,908 - INFO - Total trainable parameters: 1437705 +2025-06-28 16:45:03,962 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 16:45:03,968 - INFO - Staring training for 12 epochs +2025-06-28 16:46:09,320 - INFO - Epoch 1/12 - Train Loss: 1.253324, Val Loss: 1.131323 +2025-06-28 16:46:09,362 - INFO - New best model saved with Val Loss: 1.131323 +2025-06-28 16:46:24,542 - INFO - Epoch 2/12 - Train Loss: 1.188332, Val Loss: 1.119962 +2025-06-28 16:46:24,560 - INFO - New best model saved with Val Loss: 1.119962 +2025-06-28 16:46:35,885 - INFO - Epoch 3/12 - Train Loss: 1.182359, Val Loss: 1.114902 +2025-06-28 16:46:35,902 - INFO - New best model saved with Val Loss: 1.114902 +2025-06-28 16:46:48,232 - INFO - Epoch 4/12 - Train Loss: 1.184324, Val Loss: 1.109728 +2025-06-28 16:46:48,262 - INFO - New best model saved with Val Loss: 1.109728 +2025-06-28 16:47:01,475 - INFO - Epoch 5/12 - Train Loss: 1.177681, Val Loss: 1.104723 +2025-06-28 16:47:01,493 - INFO - New best model saved with Val Loss: 1.104723 +2025-06-28 16:47:19,029 - INFO - Epoch 6/12 - Train Loss: 1.175024, Val Loss: 1.096504 +2025-06-28 16:47:19,047 - INFO - New best model saved with Val Loss: 1.096504 +2025-06-28 16:47:31,678 - INFO - Epoch 7/12 - Train Loss: 1.177505, Val Loss: 1.082989 +2025-06-28 16:47:31,696 - INFO - New best model saved with Val Loss: 1.082989 +2025-06-28 16:47:44,080 - INFO - Epoch 8/12 - Train Loss: 1.175777, Val Loss: 1.058120 +2025-06-28 16:47:44,099 - INFO - New best model saved with Val Loss: 1.058120 +2025-06-28 16:47:55,991 - INFO - Epoch 9/12 - Train Loss: 1.170325, Val Loss: 1.028193 +2025-06-28 16:47:56,008 - INFO - New best model saved with Val Loss: 1.028193 +2025-06-28 16:48:08,047 - INFO - Epoch 10/12 - Train Loss: 1.160947, Val Loss: 1.010467 +2025-06-28 16:48:08,065 - INFO - New best model saved with Val Loss: 1.010467 +2025-06-28 16:48:21,738 - INFO - Epoch 11/12 - Train Loss: 1.173847, Val Loss: 0.980210 +2025-06-28 16:48:21,762 - INFO - New best model saved with Val Loss: 0.980210 +2025-06-28 16:48:35,348 - INFO - Epoch 12/12 - Train Loss: 1.172093, Val Loss: 1.007287 +2025-06-28 16:48:35,481 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-28 16:48:35,493 - INFO - Testing the final model +2025-06-28 16:49:38,705 - INFO - args.exp_name : Train_Test +2025-06-28 16:49:38,714 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 16:49:38,714 - INFO - Starting training with 1 GPUs +2025-06-28 16:49:43,284 - INFO - Total trainable parameters: 1437705 +2025-06-28 16:49:43,340 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 16:49:43,345 - INFO - Staring training for 12 epochs +2025-06-28 16:50:00,842 - INFO - Epoch 1/12 - Train Loss: 1.253324, Val Loss: 1.131323 +2025-06-28 16:50:00,887 - INFO - New best model saved with Val Loss: 1.131323 +2025-06-28 16:50:13,527 - INFO - Epoch 2/12 - Train Loss: 1.188332, Val Loss: 1.119962 +2025-06-28 16:50:13,545 - INFO - New best model saved with Val Loss: 1.119962 +2025-06-28 16:50:26,179 - INFO - Epoch 3/12 - Train Loss: 1.182359, Val Loss: 1.114902 +2025-06-28 16:50:26,197 - INFO - New best model saved with Val Loss: 1.114902 +2025-06-28 16:50:38,916 - INFO - Epoch 4/12 - Train Loss: 1.184324, Val Loss: 1.109728 +2025-06-28 16:50:38,935 - INFO - New best model saved with Val Loss: 1.109728 +2025-06-28 16:50:51,177 - INFO - Epoch 5/12 - Train Loss: 1.177681, Val Loss: 1.104723 +2025-06-28 16:50:51,194 - INFO - New best model saved with Val Loss: 1.104723 +2025-06-28 16:51:03,522 - INFO - Epoch 6/12 - Train Loss: 1.175024, Val Loss: 1.096504 +2025-06-28 16:51:03,538 - INFO - New best model saved with Val Loss: 1.096504 +2025-06-28 16:51:14,828 - INFO - Epoch 7/12 - Train Loss: 1.177505, Val Loss: 1.082989 +2025-06-28 16:51:14,844 - INFO - New best model saved with Val Loss: 1.082989 +2025-06-28 16:51:26,940 - INFO - Epoch 8/12 - Train Loss: 1.175777, Val Loss: 1.058120 +2025-06-28 16:51:26,957 - INFO - New best model saved with Val Loss: 1.058120 +2025-06-28 16:51:39,875 - INFO - Epoch 9/12 - Train Loss: 1.170325, Val Loss: 1.028193 +2025-06-28 16:51:39,891 - INFO - New best model saved with Val Loss: 1.028193 +2025-06-28 16:51:52,764 - INFO - Epoch 10/12 - Train Loss: 1.160947, Val Loss: 1.010467 +2025-06-28 16:51:52,783 - INFO - New best model saved with Val Loss: 1.010467 +2025-06-28 16:52:04,711 - INFO - Epoch 11/12 - Train Loss: 1.173847, Val Loss: 0.980210 +2025-06-28 16:52:04,729 - INFO - New best model saved with Val Loss: 0.980210 +2025-06-28 16:52:17,143 - INFO - Epoch 12/12 - Train Loss: 1.172093, Val Loss: 1.007287 +2025-06-28 16:52:17,274 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-28 16:52:17,289 - INFO - Testing the final model +2025-06-28 17:40:57,583 - INFO - args.exp_name : Train_Test +2025-06-28 17:40:57,592 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 17:40:57,593 - INFO - Starting training with 1 GPUs +2025-06-28 17:41:02,288 - INFO - Total trainable parameters: 1437705 +2025-06-28 17:41:02,349 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 17:41:02,353 - INFO - Staring training for 12 epochs +2025-06-28 17:41:20,569 - INFO - Epoch 1/12 - Train Loss: 1.253324, Val Loss: 1.140946 +2025-06-28 17:41:20,597 - INFO - New best model saved with Val Loss: 1.140946 +2025-06-28 17:41:33,602 - INFO - Epoch 2/12 - Train Loss: 1.188389, Val Loss: 1.113803 +2025-06-28 17:41:33,622 - INFO - New best model saved with Val Loss: 1.113803 +2025-06-28 17:41:45,120 - INFO - Epoch 3/12 - Train Loss: 1.182415, Val Loss: 1.121538 +2025-06-28 17:41:58,656 - INFO - Epoch 4/12 - Train Loss: 1.184420, Val Loss: 1.125069 +2025-06-28 17:42:11,109 - INFO - Epoch 5/12 - Train Loss: 1.177936, Val Loss: 1.114436 +2025-06-28 17:42:27,952 - INFO - Epoch 6/12 - Train Loss: 1.174998, Val Loss: 1.103332 +2025-06-28 17:42:27,982 - INFO - New best model saved with Val Loss: 1.103332 +2025-06-28 17:42:40,462 - INFO - Epoch 7/12 - Train Loss: 1.177706, Val Loss: 1.091069 +2025-06-28 17:42:40,485 - INFO - New best model saved with Val Loss: 1.091069 +2025-06-28 17:42:52,824 - INFO - Epoch 8/12 - Train Loss: 1.176191, Val Loss: 1.076332 +2025-06-28 17:42:52,849 - INFO - New best model saved with Val Loss: 1.076332 +2025-06-28 17:43:10,553 - INFO - Epoch 9/12 - Train Loss: 1.170609, Val Loss: 1.064585 +2025-06-28 17:43:10,570 - INFO - New best model saved with Val Loss: 1.064585 +2025-06-28 17:43:23,868 - INFO - Epoch 10/12 - Train Loss: 1.161243, Val Loss: 1.050977 +2025-06-28 17:43:23,898 - INFO - New best model saved with Val Loss: 1.050977 +2025-06-28 17:43:35,538 - INFO - Epoch 11/12 - Train Loss: 1.173800, Val Loss: 1.018686 +2025-06-28 17:43:35,567 - INFO - New best model saved with Val Loss: 1.018686 +2025-06-28 17:43:47,557 - INFO - Epoch 12/12 - Train Loss: 1.172042, Val Loss: 0.995451 +2025-06-28 17:43:47,574 - INFO - New best model saved with Val Loss: 0.995451 +2025-06-28 17:43:47,722 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-28 17:43:47,732 - INFO - Testing the final model +2025-06-28 17:43:50,700 - INFO - mse type: +2025-06-28 18:21:25,619 - INFO - args.exp_name : Train_Test +2025-06-28 18:21:25,622 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 18:21:25,622 - INFO - Starting training with 1 GPUs +2025-06-28 18:21:27,703 - INFO - Total trainable parameters: 1437705 +2025-06-28 18:21:27,761 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 18:21:27,762 - INFO - Staring training for 12 epochs +2025-06-28 18:21:39,806 - INFO - Epoch 1/12 - Train Loss: 1.253324, Val Loss: 1.140946 +2025-06-28 18:21:39,849 - INFO - New best model saved with Val Loss: 1.140946 +2025-06-28 18:21:50,336 - INFO - Epoch 2/12 - Train Loss: 1.188389, Val Loss: 1.113803 +2025-06-28 18:21:50,361 - INFO - New best model saved with Val Loss: 1.113803 +2025-06-28 18:22:01,754 - INFO - Epoch 3/12 - Train Loss: 1.182415, Val Loss: 1.121538 +2025-06-28 18:22:13,083 - INFO - Epoch 4/12 - Train Loss: 1.184420, Val Loss: 1.125069 +2025-06-28 18:22:23,809 - INFO - Epoch 5/12 - Train Loss: 1.177936, Val Loss: 1.114436 +2025-06-28 18:22:34,385 - INFO - Epoch 6/12 - Train Loss: 1.174998, Val Loss: 1.103332 +2025-06-28 18:22:34,412 - INFO - New best model saved with Val Loss: 1.103332 +2025-06-28 18:22:44,971 - INFO - Epoch 7/12 - Train Loss: 1.177706, Val Loss: 1.091069 +2025-06-28 18:22:44,996 - INFO - New best model saved with Val Loss: 1.091069 +2025-06-28 18:22:55,323 - INFO - Epoch 8/12 - Train Loss: 1.176191, Val Loss: 1.076332 +2025-06-28 18:22:55,348 - INFO - New best model saved with Val Loss: 1.076332 +2025-06-28 18:23:05,672 - INFO - Epoch 9/12 - Train Loss: 1.170609, Val Loss: 1.064585 +2025-06-28 18:23:05,696 - INFO - New best model saved with Val Loss: 1.064585 +2025-06-28 18:23:16,284 - INFO - Epoch 10/12 - Train Loss: 1.161243, Val Loss: 1.050977 +2025-06-28 18:23:16,309 - INFO - New best model saved with Val Loss: 1.050977 +2025-06-28 18:23:26,649 - INFO - Epoch 11/12 - Train Loss: 1.173800, Val Loss: 1.018686 +2025-06-28 18:23:26,675 - INFO - New best model saved with Val Loss: 1.018686 +2025-06-28 18:23:37,014 - INFO - Epoch 12/12 - Train Loss: 1.172042, Val Loss: 0.995451 +2025-06-28 18:23:37,038 - INFO - New best model saved with Val Loss: 0.995451 +2025-06-28 18:23:37,175 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-28 18:23:37,175 - INFO - Testing the final model +2025-06-28 18:23:40,000 - INFO - mse type: +2025-06-28 18:23:40,005 - INFO - mse shape: torch.Size([]) +2025-06-28 18:23:40,005 - INFO - mae type: +2025-06-28 18:23:40,005 - INFO - mae shape: torch.Size([]) +2025-06-28 18:23:40,041 - INFO - mse type: +2025-06-28 18:23:40,041 - INFO - mse shape: torch.Size([]) +2025-06-28 18:23:40,041 - INFO - mae type: +2025-06-28 18:23:40,041 - INFO - mae shape: torch.Size([]) +2025-06-28 18:23:40,275 - INFO - mse type: +2025-06-28 18:23:40,279 - INFO - mse shape: torch.Size([]) +2025-06-28 18:23:40,279 - INFO - mae type: +2025-06-28 18:23:40,288 - INFO - mae shape: torch.Size([]) +2025-06-28 18:30:15,743 - INFO - args.exp_name : Train_Test +2025-06-28 18:30:15,755 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 18:30:15,755 - INFO - Starting training with 1 GPUs +2025-06-28 18:30:22,593 - INFO - Total trainable parameters: 1437705 +2025-06-28 18:30:22,648 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 18:30:22,651 - INFO - Staring training for 12 epochs +2025-06-28 18:30:42,202 - INFO - Epoch 1/12 - Train Loss: 1.253324, Val Loss: 1.140946 +2025-06-28 18:30:42,231 - INFO - New best model saved with Val Loss: 1.140946 +2025-06-28 18:30:58,928 - INFO - Epoch 2/12 - Train Loss: 1.188389, Val Loss: 1.113803 +2025-06-28 18:30:58,949 - INFO - New best model saved with Val Loss: 1.113803 +2025-06-28 18:31:15,250 - INFO - Epoch 3/12 - Train Loss: 1.182415, Val Loss: 1.121538 +2025-06-28 18:31:31,814 - INFO - Epoch 4/12 - Train Loss: 1.184420, Val Loss: 1.125069 +2025-06-28 18:31:47,594 - INFO - Epoch 5/12 - Train Loss: 1.177936, Val Loss: 1.114436 +2025-06-28 18:32:03,738 - INFO - Epoch 6/12 - Train Loss: 1.174998, Val Loss: 1.103332 +2025-06-28 18:32:03,759 - INFO - New best model saved with Val Loss: 1.103332 +2025-06-28 18:32:20,505 - INFO - Epoch 7/12 - Train Loss: 1.177706, Val Loss: 1.091069 +2025-06-28 18:32:20,525 - INFO - New best model saved with Val Loss: 1.091069 +2025-06-28 18:34:09,706 - INFO - args.exp_name : Train_Test +2025-06-28 18:34:09,715 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-28 18:34:09,715 - INFO - Starting training with 1 GPUs +2025-06-28 18:34:15,351 - INFO - Total trainable parameters: 1437705 +2025-06-28 18:34:15,413 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-28 18:34:15,419 - INFO - Staring training for 12 epochs +2025-06-28 18:34:34,140 - INFO - Epoch 1/12 - Train Loss: 1.253324, Val Loss: 1.140946 +2025-06-28 18:34:34,190 - INFO - New best model saved with Val Loss: 1.140946 +2025-06-28 18:34:48,816 - INFO - Epoch 2/12 - Train Loss: 1.188389, Val Loss: 1.113803 +2025-06-28 18:34:48,837 - INFO - New best model saved with Val Loss: 1.113803 +2025-06-28 18:35:03,056 - INFO - Epoch 3/12 - Train Loss: 1.182415, Val Loss: 1.121538 +2025-06-28 18:35:16,281 - INFO - Epoch 4/12 - Train Loss: 1.184420, Val Loss: 1.125069 +2025-06-28 18:35:31,027 - INFO - Epoch 5/12 - Train Loss: 1.177936, Val Loss: 1.114436 +2025-06-28 18:35:45,304 - INFO - Epoch 6/12 - Train Loss: 1.174998, Val Loss: 1.103332 +2025-06-28 18:35:45,339 - INFO - New best model saved with Val Loss: 1.103332 +2025-06-28 18:35:59,806 - INFO - Epoch 7/12 - Train Loss: 1.177706, Val Loss: 1.091069 +2025-06-28 18:35:59,823 - INFO - New best model saved with Val Loss: 1.091069 +2025-06-28 18:36:15,787 - INFO - Epoch 8/12 - Train Loss: 1.176191, Val Loss: 1.076332 +2025-06-28 18:36:15,807 - INFO - New best model saved with Val Loss: 1.076332 +2025-06-28 18:36:30,336 - INFO - Epoch 9/12 - Train Loss: 1.170609, Val Loss: 1.064585 +2025-06-28 18:36:30,356 - INFO - New best model saved with Val Loss: 1.064585 +2025-06-28 18:36:44,411 - INFO - Epoch 10/12 - Train Loss: 1.161243, Val Loss: 1.050977 +2025-06-28 18:36:44,430 - INFO - New best model saved with Val Loss: 1.050977 +2025-06-28 18:37:00,978 - INFO - Epoch 11/12 - Train Loss: 1.173800, Val Loss: 1.018686 +2025-06-28 18:37:01,001 - INFO - New best model saved with Val Loss: 1.018686 +2025-06-28 18:37:14,497 - INFO - Epoch 12/12 - Train Loss: 1.172042, Val Loss: 0.995451 +2025-06-28 18:37:14,518 - INFO - New best model saved with Val Loss: 0.995451 +2025-06-28 18:37:14,668 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-28 18:37:14,679 - INFO - Testing the final model +2025-06-28 18:37:19,202 - INFO - mse type: +2025-06-28 18:37:19,208 - INFO - mse value: 1.0333396196365356 +2025-06-28 18:37:19,208 - INFO - mse value_with_item: 1.0333396196365356 +2025-06-28 18:37:19,208 - INFO - mae type: +2025-06-28 18:37:19,209 - INFO - mae value: 0.6205043792724609 +2025-06-28 18:37:19,209 - INFO - mae value_with_item: 0.6205043792724609 +2025-06-28 18:37:19,246 - INFO - rel_l1 value: 0.9312472343444824 +2025-06-28 18:37:19,246 - INFO - rel_l1_with_item value: 0.9312472343444824 +2025-06-28 18:37:19,246 - INFO - rel_l2 value: 0.9395401477813721 +2025-06-28 18:37:19,246 - INFO - rel_l2_with_item value: 0.9395401477813721 +2025-06-28 18:37:19,252 - INFO - mse type: +2025-06-28 18:37:19,478 - INFO - mse value: 1.0370538234710693 +2025-06-28 18:37:19,478 - INFO - mse value_with_item: 1.0370538234710693 +2025-06-28 18:37:19,478 - INFO - mae type: +2025-06-28 18:37:19,478 - INFO - mae value: 0.6142085194587708 +2025-06-28 18:37:19,478 - INFO - mae value_with_item: 0.6142085194587708 +2025-06-28 18:37:19,479 - INFO - rel_l1 value: 0.9384238719940186 +2025-06-28 18:37:19,479 - INFO - rel_l1_with_item value: 0.9384238719940186 +2025-06-28 18:37:19,479 - INFO - rel_l2 value: 0.9456447958946228 +2025-06-28 18:37:19,479 - INFO - rel_l2_with_item value: 0.9456447958946228 +2025-06-28 18:37:19,484 - INFO - mse type: +2025-06-28 18:37:19,710 - INFO - mse value: 0.9745650291442871 +2025-06-28 18:37:19,711 - INFO - mse value_with_item: 0.9745650291442871 +2025-06-28 18:37:19,711 - INFO - mae type: +2025-06-28 18:37:19,711 - INFO - mae value: 0.5982407927513123 +2025-06-28 18:37:19,711 - INFO - mae value_with_item: 0.5982407927513123 +2025-06-28 18:37:19,711 - INFO - rel_l1 value: 0.9408509731292725 +2025-06-28 18:37:19,712 - INFO - rel_l1_with_item value: 0.9408509731292725 +2025-06-28 18:37:19,712 - INFO - rel_l2 value: 0.9449415802955627 +2025-06-28 18:37:19,712 - INFO - rel_l2_with_item value: 0.9449415802955627 +2025-06-30 14:25:18,222 - INFO - args.exp_name : Train_Test +2025-06-30 14:25:18,227 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-30 14:25:18,227 - INFO - Starting training with 1 GPUs +2025-06-30 14:25:45,431 - INFO - Total trainable parameters: 1437705 +2025-06-30 14:25:45,731 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-30 14:25:45,772 - INFO - Staring training for 12 epochs +2025-06-30 14:26:41,222 - INFO - Epoch 1/12 - Train Loss: 1.253324, Val Loss: 1.140946 +2025-06-30 14:26:41,322 - INFO - New best model saved with Val Loss: 1.140946 +2025-06-30 14:27:12,608 - INFO - Epoch 2/12 - Train Loss: 1.188389, Val Loss: 1.113803 +2025-06-30 14:27:12,671 - INFO - New best model saved with Val Loss: 1.113803 +2025-06-30 14:27:33,990 - INFO - Epoch 3/12 - Train Loss: 1.182415, Val Loss: 1.121538 +2025-06-30 14:27:51,809 - INFO - Epoch 4/12 - Train Loss: 1.184420, Val Loss: 1.125069 +2025-06-30 14:28:10,981 - INFO - Epoch 5/12 - Train Loss: 1.177936, Val Loss: 1.114436 +2025-06-30 14:28:35,485 - INFO - Epoch 6/12 - Train Loss: 1.174998, Val Loss: 1.103332 +2025-06-30 14:28:35,537 - INFO - New best model saved with Val Loss: 1.103332 +2025-06-30 14:28:54,890 - INFO - Epoch 7/12 - Train Loss: 1.177706, Val Loss: 1.091069 +2025-06-30 14:28:54,934 - INFO - New best model saved with Val Loss: 1.091069 +2025-06-30 14:29:14,007 - INFO - Epoch 8/12 - Train Loss: 1.176191, Val Loss: 1.076332 +2025-06-30 14:29:14,059 - INFO - New best model saved with Val Loss: 1.076332 +2025-06-30 14:29:47,099 - INFO - Epoch 9/12 - Train Loss: 1.170609, Val Loss: 1.064585 +2025-06-30 14:29:47,152 - INFO - New best model saved with Val Loss: 1.064585 +2025-06-30 14:30:13,224 - INFO - Epoch 10/12 - Train Loss: 1.161243, Val Loss: 1.050977 +2025-06-30 14:30:13,276 - INFO - New best model saved with Val Loss: 1.050977 +2025-06-30 14:30:34,450 - INFO - Epoch 11/12 - Train Loss: 1.173800, Val Loss: 1.018686 +2025-06-30 14:30:34,507 - INFO - New best model saved with Val Loss: 1.018686 +2025-06-30 14:30:52,670 - INFO - Epoch 12/12 - Train Loss: 1.172042, Val Loss: 0.995451 +2025-06-30 14:30:52,716 - INFO - New best model saved with Val Loss: 0.995451 +2025-06-30 14:30:53,130 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-30 14:30:53,131 - INFO - Testing the final model +2025-06-30 14:30:59,473 - INFO - normalized_outputs type: +2025-06-30 14:30:59,505 - INFO - normalized_outputs shape: torch.Size([8, 10000]) +2025-06-30 14:30:59,505 - INFO - normalized_targets type: +2025-06-30 14:30:59,505 - INFO - normalized_targets shape: torch.Size([8, 10000]) +2025-06-30 14:30:59,705 - INFO - mse type: +2025-06-30 14:30:59,706 - INFO - mse value: 1.0333396196365356 +2025-06-30 14:30:59,706 - INFO - mse value_with_item: 1.0333396196365356 +2025-06-30 14:30:59,706 - INFO - mae type: +2025-06-30 14:30:59,706 - INFO - mae value: 0.6205043792724609 +2025-06-30 14:30:59,706 - INFO - mae value_with_item: 0.6205043792724609 +2025-06-30 14:30:59,744 - INFO - rel_l1 value: 0.9312472343444824 +2025-06-30 14:30:59,744 - INFO - rel_l1_with_item value: 0.9312472343444824 +2025-06-30 14:30:59,744 - INFO - rel_l2 value: 0.9395401477813721 +2025-06-30 14:30:59,744 - INFO - rel_l2_with_item value: 0.9395401477813721 +2025-06-30 14:30:59,811 - INFO - normalized_outputs type: +2025-06-30 14:30:59,812 - INFO - normalized_outputs shape: torch.Size([8, 10000]) +2025-06-30 14:30:59,812 - INFO - normalized_targets type: +2025-06-30 14:30:59,822 - INFO - normalized_targets shape: torch.Size([8, 10000]) +2025-06-30 14:30:59,822 - INFO - mse type: +2025-06-30 14:31:00,037 - INFO - mse value: 1.0370538234710693 +2025-06-30 14:31:00,038 - INFO - mse value_with_item: 1.0370538234710693 +2025-06-30 14:31:00,038 - INFO - mae type: +2025-06-30 14:31:00,038 - INFO - mae value: 0.6142085194587708 +2025-06-30 14:31:00,038 - INFO - mae value_with_item: 0.6142085194587708 +2025-06-30 14:31:00,038 - INFO - rel_l1 value: 0.9384238719940186 +2025-06-30 14:31:00,038 - INFO - rel_l1_with_item value: 0.9384238719940186 +2025-06-30 14:31:00,038 - INFO - rel_l2 value: 0.9456447958946228 +2025-06-30 14:31:00,038 - INFO - rel_l2_with_item value: 0.9456447958946228 +2025-06-30 14:31:00,227 - INFO - normalized_outputs type: +2025-06-30 14:31:00,227 - INFO - normalized_outputs shape: torch.Size([8, 10000]) +2025-06-30 14:31:00,227 - INFO - normalized_targets type: +2025-06-30 14:31:00,227 - INFO - normalized_targets shape: torch.Size([8, 10000]) +2025-06-30 14:31:00,227 - INFO - mse type: +2025-06-30 14:31:00,467 - INFO - mse value: 0.9745650291442871 +2025-06-30 14:31:00,497 - INFO - mse value_with_item: 0.9745650291442871 +2025-06-30 14:31:00,497 - INFO - mae type: +2025-06-30 14:31:00,497 - INFO - mae value: 0.5982407927513123 +2025-06-30 14:31:00,497 - INFO - mae value_with_item: 0.5982407927513123 +2025-06-30 14:31:00,498 - INFO - rel_l1 value: 0.9408509731292725 +2025-06-30 14:31:00,498 - INFO - rel_l1_with_item value: 0.9408509731292725 +2025-06-30 14:31:00,498 - INFO - rel_l2 value: 0.9449415802955627 +2025-06-30 14:31:00,498 - INFO - rel_l2_with_item value: 0.9449415802955627 +2025-06-30 14:45:59,768 - INFO - args.exp_name : Train_Test +2025-06-30 14:45:59,773 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-30 14:45:59,773 - INFO - Starting training with 1 GPUs +2025-06-30 14:46:18,462 - INFO - Total trainable parameters: 1437705 +2025-06-30 14:46:18,629 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-30 14:46:18,665 - INFO - Staring training for 12 epochs +2025-06-30 14:48:56,460 - INFO - args.exp_name : Train_Test +2025-06-30 14:48:56,483 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-30 14:48:56,483 - INFO - Starting training with 1 GPUs +2025-06-30 14:49:13,574 - INFO - Total trainable parameters: 1437705 +2025-06-30 14:49:13,861 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-30 14:49:13,876 - INFO - Staring training for 12 epochs +2025-06-30 14:49:13,956 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-30 14:49:13,959 - INFO - Testing the final model +2025-06-30 14:49:35,414 - INFO - normalized_outputs type: +2025-06-30 15:15:54,168 - INFO - args.exp_name : Train_Test +2025-06-30 15:15:54,192 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-30 15:15:54,199 - INFO - Starting training with 1 GPUs +2025-06-30 15:16:04,837 - INFO - Total trainable parameters: 1437705 +2025-06-30 15:16:04,900 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-30 15:16:04,909 - INFO - Staring training for 12 epochs +2025-06-30 15:16:04,949 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-30 15:16:04,959 - INFO - Testing the final model +2025-06-30 15:16:19,803 - INFO - normalized_outputs type: +2025-06-30 15:16:19,803 - INFO - normalized_outputs length : 1 +2025-06-30 15:16:20,056 - INFO - normalized_outputs type: +2025-06-30 15:16:20,056 - INFO - normalized_outputs length : 2 +2025-06-30 15:16:20,288 - INFO - normalized_outputs type: +2025-06-30 15:16:20,288 - INFO - normalized_outputs length : 3 +2025-06-30 15:19:06,968 - INFO - args.exp_name : Train_Test +2025-06-30 15:19:06,985 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-30 15:19:06,985 - INFO - Starting training with 1 GPUs +2025-06-30 15:19:16,304 - INFO - Total trainable parameters: 1437705 +2025-06-30 15:19:16,388 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-30 15:19:16,389 - INFO - Staring training for 12 epochs +2025-06-30 15:19:16,454 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-30 15:19:16,462 - INFO - Testing the final model +2025-06-30 15:19:29,598 - INFO - normalized_outputs type: +2025-06-30 15:19:29,640 - INFO - normalized_outputs first 5 value : [tensor([[-0.0004, 0.0395, 0.0787, ..., 0.0295, 0.0384, -0.0151], + [ 0.0676, 0.0940, -0.1913, ..., 0.0342, 0.0119, -0.0270], + [-0.1073, 0.0153, -0.0993, ..., -0.0663, 0.0120, 0.0184], + ..., + [-0.0584, -0.1467, 0.0653, ..., -0.0670, 0.0390, -0.0941], + [-0.1066, -0.1257, -0.0436, ..., 0.0280, -0.1407, -0.0362], + [-0.1631, -0.0101, 0.0420, ..., -0.0108, 0.0685, 0.0153]])] +2025-06-30 15:19:29,872 - INFO - normalized_outputs type: +2025-06-30 15:19:29,874 - INFO - normalized_outputs first 5 value : [tensor([[-0.0004, 0.0395, 0.0787, ..., 0.0295, 0.0384, -0.0151], + [ 0.0676, 0.0940, -0.1913, ..., 0.0342, 0.0119, -0.0270], + [-0.1073, 0.0153, -0.0993, ..., -0.0663, 0.0120, 0.0184], + ..., + [-0.0584, -0.1467, 0.0653, ..., -0.0670, 0.0390, -0.0941], + [-0.1066, -0.1257, -0.0436, ..., 0.0280, -0.1407, -0.0362], + [-0.1631, -0.0101, 0.0420, ..., -0.0108, 0.0685, 0.0153]]), tensor([[-1.3223e-01, 4.0609e-01, -1.0546e-01, ..., 8.2760e-02, + -2.9105e-02, -7.0697e-02], + [ 2.6392e-02, 2.4208e-03, 7.3777e-02, ..., -7.4097e-02, + -7.5070e-02, -1.2867e-01], + [-8.9194e-02, -1.5162e-01, 9.0782e-03, ..., 2.4502e-02, + 3.7299e-02, 4.6280e-02], + ..., + [-1.6333e-02, -9.1008e-02, -1.0734e-01, ..., 9.9977e-03, + 4.0366e-02, 1.9166e-02], + [-3.6702e-02, -8.2186e-02, -9.7411e-02, ..., -8.2859e-02, + -9.1823e-02, -1.0160e-02], + [ 2.6854e-02, -1.2274e-01, -2.8979e-04, ..., -1.3167e-01, + 1.9786e-02, -1.7892e-02]])] +2025-06-30 15:19:30,106 - INFO - normalized_outputs type: +2025-06-30 15:19:30,108 - INFO - normalized_outputs first 5 value : [tensor([[-0.0004, 0.0395, 0.0787, ..., 0.0295, 0.0384, -0.0151], + [ 0.0676, 0.0940, -0.1913, ..., 0.0342, 0.0119, -0.0270], + [-0.1073, 0.0153, -0.0993, ..., -0.0663, 0.0120, 0.0184], + ..., + [-0.0584, -0.1467, 0.0653, ..., -0.0670, 0.0390, -0.0941], + [-0.1066, -0.1257, -0.0436, ..., 0.0280, -0.1407, -0.0362], + [-0.1631, -0.0101, 0.0420, ..., -0.0108, 0.0685, 0.0153]]), tensor([[-1.3223e-01, 4.0609e-01, -1.0546e-01, ..., 8.2760e-02, + -2.9105e-02, -7.0697e-02], + [ 2.6392e-02, 2.4208e-03, 7.3777e-02, ..., -7.4097e-02, + -7.5070e-02, -1.2867e-01], + [-8.9194e-02, -1.5162e-01, 9.0782e-03, ..., 2.4502e-02, + 3.7299e-02, 4.6280e-02], + ..., + [-1.6333e-02, -9.1008e-02, -1.0734e-01, ..., 9.9977e-03, + 4.0366e-02, 1.9166e-02], + [-3.6702e-02, -8.2186e-02, -9.7411e-02, ..., -8.2859e-02, + -9.1823e-02, -1.0160e-02], + [ 2.6854e-02, -1.2274e-01, -2.8979e-04, ..., -1.3167e-01, + 1.9786e-02, -1.7892e-02]]), tensor([[ 9.4023e-02, -1.7952e-01, -5.0626e-02, ..., 8.2223e-02, + -2.3529e-03, -4.7622e-02], + [-8.2041e-02, -7.6069e-03, 4.4658e-02, ..., -4.3549e-02, + -5.0403e-02, 1.9752e-02], + [-4.7886e-02, -1.0890e-02, -7.9050e-02, ..., -1.2168e-01, + -7.0545e-02, 1.5359e-01], + ..., + [-8.7113e-02, -1.1998e-01, -1.0086e-01, ..., -1.1805e-01, + -3.3237e-02, -1.0960e-01], + [ 6.0501e-02, 3.9387e-02, -8.4609e-02, ..., 8.6239e-03, + -1.6217e-01, 2.3374e-02], + [ 5.2814e-02, -8.3480e-02, -7.3634e-05, ..., -1.2143e-01, + 1.0725e-02, -3.9310e-02]])] +2025-06-30 15:29:57,305 - INFO - args.exp_name : Train_Test +2025-06-30 15:29:57,353 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-30 15:29:57,353 - INFO - Starting training with 1 GPUs +2025-06-30 15:30:10,243 - INFO - Total trainable parameters: 1437705 +2025-06-30 15:30:10,306 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-30 15:30:10,311 - INFO - Staring training for 12 epochs +2025-06-30 15:30:10,330 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-30 15:30:10,334 - INFO - Testing the final model +2025-06-30 15:30:24,502 - INFO - Total MSE across all processes: 24.359668731689453 +2025-06-30 16:19:27,843 - INFO - args.exp_name : Train_Test +2025-06-30 16:19:27,891 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-30 16:19:27,891 - INFO - Starting training with 1 GPUs +2025-06-30 16:19:47,594 - INFO - Total trainable parameters: 1437705 +2025-06-30 16:19:47,668 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-30 16:19:47,701 - INFO - Staring training for 12 epochs +2025-06-30 16:24:15,185 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-30 16:24:26,595 - INFO - Total MSE across all processes: 24.359668731689453 +2025-06-30 16:24:26,597 - INFO - mean value for all_targets: {tmp} +2025-06-30 16:24:26,599 - INFO - Test MSE: 1.014986, Test MAE: 0.610985, Max MAE: 17.060396, Test R2: 0.1095 +2025-06-30 16:24:26,599 - INFO - Relative L2 Error: 0.943376, Relative L1 error: 0.936841 +2025-06-30 16:24:26,599 - INFO - Total inference time: 0.01s for 24 samples +2025-06-30 16:24:26,600 - INFO - Testing the final model +2025-06-30 16:24:34,020 - INFO - Total MSE across all processes: 24.359668731689453 +2025-06-30 16:24:34,021 - INFO - mean value for all_targets: {tmp} +2025-06-30 16:24:34,022 - INFO - Test MSE: 1.014986, Test MAE: 0.610985, Max MAE: 17.060396, Test R2: 0.1095 +2025-06-30 16:24:34,022 - INFO - Relative L2 Error: 0.943376, Relative L1 error: 0.936841 +2025-06-30 16:24:34,030 - INFO - Total inference time: 0.01s for 24 samples +2025-06-30 16:40:49,296 - INFO - args.exp_name : Train_Test +2025-06-30 16:40:49,336 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=8, epochs=12, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-30 16:40:49,336 - INFO - Starting training with 1 GPUs +2025-06-30 16:41:01,051 - INFO - Total trainable parameters: 1437705 +2025-06-30 16:41:01,107 - INFO - Data loaded: 10 training batches, 2 validation batches, 3 test batches +2025-06-30 16:41:01,110 - INFO - Staring training for 12 epochs +2025-06-30 16:41:37,009 - INFO - Epoch 1/12 - Train Loss: 1.253324, Val Loss: 1.140946 +2025-06-30 16:41:37,088 - INFO - New best model saved with Val Loss: 1.140946 +2025-06-30 16:42:02,680 - INFO - Epoch 2/12 - Train Loss: 1.188389, Val Loss: 1.113803 +2025-06-30 16:42:02,725 - INFO - New best model saved with Val Loss: 1.113803 +2025-06-30 16:42:27,807 - INFO - Epoch 3/12 - Train Loss: 1.182415, Val Loss: 1.121538 +2025-06-30 16:42:52,816 - INFO - Epoch 4/12 - Train Loss: 1.184420, Val Loss: 1.125069 +2025-06-30 16:43:10,803 - INFO - Epoch 5/12 - Train Loss: 1.177936, Val Loss: 1.114436 +2025-06-30 16:43:32,894 - INFO - Epoch 6/12 - Train Loss: 1.174998, Val Loss: 1.103332 +2025-06-30 16:43:32,965 - INFO - New best model saved with Val Loss: 1.103332 +2025-06-30 16:43:59,150 - INFO - Epoch 7/12 - Train Loss: 1.177706, Val Loss: 1.091069 +2025-06-30 16:43:59,211 - INFO - New best model saved with Val Loss: 1.091069 +2025-06-30 16:44:24,872 - INFO - Epoch 8/12 - Train Loss: 1.176191, Val Loss: 1.076332 +2025-06-30 16:44:24,936 - INFO - New best model saved with Val Loss: 1.076332 +2025-06-30 16:44:50,255 - INFO - Epoch 9/12 - Train Loss: 1.170609, Val Loss: 1.064585 +2025-06-30 16:44:50,318 - INFO - New best model saved with Val Loss: 1.064585 +2025-06-30 16:45:15,230 - INFO - Epoch 10/12 - Train Loss: 1.161243, Val Loss: 1.050977 +2025-06-30 16:45:15,291 - INFO - New best model saved with Val Loss: 1.050977 +2025-06-30 16:45:41,679 - INFO - Epoch 11/12 - Train Loss: 1.173800, Val Loss: 1.018686 +2025-06-30 16:45:41,729 - INFO - New best model saved with Val Loss: 1.018686 +2025-06-30 16:46:07,104 - INFO - Epoch 12/12 - Train Loss: 1.172042, Val Loss: 0.995451 +2025-06-30 16:46:07,152 - INFO - New best model saved with Val Loss: 0.995451 +2025-06-30 16:46:07,616 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-30 16:46:07,617 - INFO - Testing the final model +2025-06-30 16:46:19,758 - INFO - Total MSE across all processes: 24.359668731689453 +2025-06-30 16:46:19,760 - INFO - mean value for all_targets: {tmp} +2025-06-30 16:46:19,766 - INFO - Test MSE: 1.014986, Test MAE: 0.610985, Max MAE: 17.060396, Test R2: 0.1095 +2025-06-30 16:46:19,766 - INFO - Relative L2 Error: 0.943376, Relative L1 error: 0.936841 +2025-06-30 16:46:19,766 - INFO - Total inference time: 0.01s for 24 samples +2025-06-30 16:46:19,768 - INFO - Testing the best model +2025-06-30 16:46:31,723 - INFO - Total MSE across all processes: 24.359668731689453 +2025-06-30 16:46:31,724 - INFO - mean value for all_targets: {tmp} +2025-06-30 16:46:31,725 - INFO - Test MSE: 1.014986, Test MAE: 0.610985, Max MAE: 17.060396, Test R2: 0.1095 +2025-06-30 16:46:31,725 - INFO - Relative L2 Error: 0.943376, Relative L1 error: 0.936841 +2025-06-30 16:46:31,725 - INFO - Total inference time: 0.01s for 24 samples +2025-06-30 16:51:48,631 - INFO - args.exp_name : Train_Test +2025-06-30 16:51:48,649 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, batch_size=12, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-06-30 16:51:48,650 - INFO - Starting training with 1 GPUs +2025-06-30 16:52:05,381 - INFO - Total trainable parameters: 1437705 +2025-06-30 16:52:05,694 - INFO - Data loaded: 19 training batches, 4 validation batches, 4 test batches +2025-06-30 16:52:05,701 - INFO - Staring training for 150 epochs +2025-06-30 16:52:52,236 - INFO - Epoch 1/150 - Train Loss: 1.214562, Val Loss: 1.139650 +2025-06-30 16:52:52,311 - INFO - New best model saved with Val Loss: 1.139650 +2025-06-30 16:53:20,199 - INFO - Epoch 2/150 - Train Loss: 1.159891, Val Loss: 1.142322 +2025-06-30 16:53:47,984 - INFO - Epoch 3/150 - Train Loss: 1.159527, Val Loss: 1.136151 +2025-06-30 16:53:48,043 - INFO - New best model saved with Val Loss: 1.136151 +2025-06-30 16:54:16,527 - INFO - Epoch 4/150 - Train Loss: 1.158275, Val Loss: 1.134893 +2025-06-30 16:54:16,579 - INFO - New best model saved with Val Loss: 1.134893 +2025-06-30 16:54:47,309 - INFO - Epoch 5/150 - Train Loss: 1.158841, Val Loss: 1.130716 +2025-06-30 16:54:47,349 - INFO - New best model saved with Val Loss: 1.130716 +2025-06-30 16:55:14,837 - INFO - Epoch 6/150 - Train Loss: 1.157435, Val Loss: 1.128637 +2025-06-30 16:55:14,851 - INFO - New best model saved with Val Loss: 1.128637 +2025-06-30 16:55:41,294 - INFO - Epoch 7/150 - Train Loss: 1.158527, Val Loss: 1.126826 +2025-06-30 16:55:41,337 - INFO - New best model saved with Val Loss: 1.126826 +2025-06-30 16:56:15,374 - INFO - Epoch 8/150 - Train Loss: 1.159670, Val Loss: 1.127982 +2025-06-30 16:56:48,989 - INFO - Epoch 9/150 - Train Loss: 1.157885, Val Loss: 1.108465 +2025-06-30 16:56:49,056 - INFO - New best model saved with Val Loss: 1.108465 +2025-06-30 16:57:22,691 - INFO - Epoch 10/150 - Train Loss: 1.152975, Val Loss: 1.099538 +2025-06-30 16:57:22,745 - INFO - New best model saved with Val Loss: 1.099538 +2025-06-30 16:57:57,392 - INFO - Epoch 11/150 - Train Loss: 1.154798, Val Loss: 1.097098 +2025-06-30 16:57:57,436 - INFO - New best model saved with Val Loss: 1.097098 +2025-06-30 16:58:31,957 - INFO - Epoch 12/150 - Train Loss: 1.151280, Val Loss: 1.071959 +2025-06-30 16:58:32,013 - INFO - New best model saved with Val Loss: 1.071959 +2025-06-30 16:59:06,841 - INFO - Epoch 13/150 - Train Loss: 1.154427, Val Loss: 1.022585 +2025-06-30 16:59:06,871 - INFO - New best model saved with Val Loss: 1.022585 +2025-06-30 16:59:36,631 - INFO - Epoch 14/150 - Train Loss: 1.149373, Val Loss: 1.066904 +2025-06-30 17:00:06,794 - INFO - Epoch 15/150 - Train Loss: 1.153083, Val Loss: 1.044195 +2025-06-30 17:00:39,968 - INFO - Epoch 16/150 - Train Loss: 1.149388, Val Loss: 1.047594 +2025-06-30 17:01:08,692 - INFO - Epoch 17/150 - Train Loss: 1.152448, Val Loss: 1.034870 +2025-06-30 17:01:34,550 - INFO - Epoch 18/150 - Train Loss: 1.151513, Val Loss: 1.050777 +2025-06-30 17:02:04,571 - INFO - Epoch 19/150 - Train Loss: 1.146651, Val Loss: 1.016412 +2025-06-30 17:02:04,625 - INFO - New best model saved with Val Loss: 1.016412 +2025-06-30 17:02:51,972 - INFO - Epoch 20/150 - Train Loss: 1.149560, Val Loss: 1.036892 +2025-06-30 17:03:25,857 - INFO - Epoch 21/150 - Train Loss: 1.152120, Val Loss: 1.037410 +2025-06-30 17:03:58,755 - INFO - Epoch 22/150 - Train Loss: 1.150785, Val Loss: 1.030477 +2025-06-30 17:04:32,711 - INFO - Epoch 23/150 - Train Loss: 1.150990, Val Loss: 1.043045 +2025-06-30 17:05:07,070 - INFO - Epoch 24/150 - Train Loss: 1.151737, Val Loss: 1.018163 +2025-06-30 17:05:41,494 - INFO - Epoch 25/150 - Train Loss: 1.151702, Val Loss: 1.033189 +2025-06-30 17:06:15,997 - INFO - Epoch 26/150 - Train Loss: 1.152441, Val Loss: 1.031626 +2025-06-30 17:06:51,002 - INFO - Epoch 27/150 - Train Loss: 1.151930, Val Loss: 1.030635 +2025-06-30 17:07:25,442 - INFO - Epoch 28/150 - Train Loss: 1.149480, Val Loss: 1.032951 +2025-06-30 17:08:00,199 - INFO - Epoch 29/150 - Train Loss: 1.149540, Val Loss: 1.027915 +2025-06-30 17:08:34,494 - INFO - Epoch 30/150 - Train Loss: 1.150046, Val Loss: 1.025701 +2025-06-30 17:09:22,222 - INFO - Epoch 31/150 - Train Loss: 1.150857, Val Loss: 1.018049 +2025-06-30 17:09:50,295 - INFO - Epoch 32/150 - Train Loss: 1.148125, Val Loss: 1.013000 +2025-06-30 17:09:50,316 - INFO - New best model saved with Val Loss: 1.013000 +2025-06-30 17:10:18,637 - INFO - Epoch 33/150 - Train Loss: 1.153354, Val Loss: 1.019805 +2025-06-30 17:10:45,338 - INFO - Epoch 34/150 - Train Loss: 1.149081, Val Loss: 1.014312 +2025-06-30 17:11:16,784 - INFO - Epoch 35/150 - Train Loss: 1.151963, Val Loss: 1.019807 +2025-06-30 17:11:43,524 - INFO - Epoch 36/150 - Train Loss: 1.151386, Val Loss: 1.016370 +2025-06-30 17:12:10,975 - INFO - Epoch 37/150 - Train Loss: 1.152770, Val Loss: 1.015124 +2025-06-30 17:12:45,883 - INFO - Epoch 38/150 - Train Loss: 1.145815, Val Loss: 1.010870 +2025-06-30 17:12:45,945 - INFO - New best model saved with Val Loss: 1.010870 +2025-06-30 17:13:20,627 - INFO - Epoch 39/150 - Train Loss: 1.151062, Val Loss: 1.022470 +2025-06-30 17:13:54,343 - INFO - Epoch 40/150 - Train Loss: 1.149539, Val Loss: 1.015680 +2025-06-30 17:14:27,643 - INFO - Epoch 41/150 - Train Loss: 1.150300, Val Loss: 1.021166 +2025-06-30 17:14:59,917 - INFO - Epoch 42/150 - Train Loss: 1.151812, Val Loss: 1.011826 +2025-06-30 17:15:26,756 - INFO - Epoch 43/150 - Train Loss: 1.150189, Val Loss: 1.023766 +2025-06-30 17:15:57,687 - INFO - Epoch 44/150 - Train Loss: 1.151446, Val Loss: 1.012347 +2025-06-30 17:16:30,431 - INFO - Epoch 45/150 - Train Loss: 1.149226, Val Loss: 1.014126 +2025-06-30 17:17:00,758 - INFO - Epoch 46/150 - Train Loss: 1.150818, Val Loss: 1.016673 +2025-06-30 17:17:27,129 - INFO - Epoch 47/150 - Train Loss: 1.152273, Val Loss: 1.016973 +2025-06-30 17:17:53,563 - INFO - Epoch 48/150 - Train Loss: 1.152706, Val Loss: 1.013585 +2025-06-30 17:18:20,011 - INFO - Epoch 49/150 - Train Loss: 1.152967, Val Loss: 1.012885 +2025-06-30 17:18:47,411 - INFO - Epoch 50/150 - Train Loss: 1.149793, Val Loss: 1.014034 +2025-06-30 17:19:14,329 - INFO - Epoch 51/150 - Train Loss: 1.151599, Val Loss: 1.014963 +2025-06-30 17:19:40,739 - INFO - Epoch 52/150 - Train Loss: 1.149825, Val Loss: 1.015483 +2025-06-30 17:20:07,043 - INFO - Epoch 53/150 - Train Loss: 1.148759, Val Loss: 1.014243 +2025-06-30 17:20:37,431 - INFO - Epoch 54/150 - Train Loss: 1.149218, Val Loss: 1.014622 +2025-06-30 17:21:06,583 - INFO - Epoch 55/150 - Train Loss: 1.152413, Val Loss: 1.013234 +2025-06-30 17:21:32,561 - INFO - Epoch 56/150 - Train Loss: 1.149147, Val Loss: 1.014769 +2025-06-30 17:21:58,929 - INFO - Epoch 57/150 - Train Loss: 1.151125, Val Loss: 1.014339 +2025-06-30 17:22:25,086 - INFO - Epoch 58/150 - Train Loss: 1.149256, Val Loss: 1.013369 +2025-06-30 17:22:52,282 - INFO - Epoch 59/150 - Train Loss: 1.151146, Val Loss: 1.014887 +2025-06-30 17:23:20,401 - INFO - Epoch 60/150 - Train Loss: 1.151584, Val Loss: 1.014976 +2025-06-30 17:23:47,937 - INFO - Epoch 61/150 - Train Loss: 1.151559, Val Loss: 1.015210 +2025-06-30 17:24:15,019 - INFO - Epoch 62/150 - Train Loss: 1.145339, Val Loss: 1.014917 +2025-06-30 17:24:45,767 - INFO - Epoch 63/150 - Train Loss: 1.152052, Val Loss: 1.014691 +2025-06-30 17:25:16,289 - INFO - Epoch 64/150 - Train Loss: 1.149146, Val Loss: 1.014958 +2025-06-30 17:25:54,057 - INFO - Epoch 65/150 - Train Loss: 1.151268, Val Loss: 1.014413 +2025-06-30 17:26:20,327 - INFO - Epoch 66/150 - Train Loss: 1.152615, Val Loss: 1.014783 +2025-06-30 17:26:52,441 - INFO - Epoch 67/150 - Train Loss: 1.150394, Val Loss: 1.015154 +2025-06-30 17:27:26,489 - INFO - Epoch 68/150 - Train Loss: 1.150520, Val Loss: 1.014982 +2025-06-30 17:27:53,309 - INFO - Epoch 69/150 - Train Loss: 1.149891, Val Loss: 1.015464 +2025-06-30 17:28:21,430 - INFO - Epoch 70/150 - Train Loss: 1.151592, Val Loss: 1.015131 +2025-06-30 17:28:48,656 - INFO - Epoch 71/150 - Train Loss: 1.150308, Val Loss: 1.014803 +2025-06-30 17:29:14,651 - INFO - Epoch 72/150 - Train Loss: 1.151882, Val Loss: 1.014853 +2025-06-30 17:29:44,110 - INFO - Epoch 73/150 - Train Loss: 1.152950, Val Loss: 1.015332 +2025-06-30 17:30:11,674 - INFO - Epoch 74/150 - Train Loss: 1.148542, Val Loss: 1.014488 +2025-06-30 17:30:40,635 - INFO - Epoch 75/150 - Train Loss: 1.152436, Val Loss: 1.015019 +2025-06-30 17:31:15,162 - INFO - Epoch 76/150 - Train Loss: 1.148143, Val Loss: 1.015147 +2025-06-30 17:31:49,116 - INFO - Epoch 77/150 - Train Loss: 1.153455, Val Loss: 1.014535 +2025-06-30 17:32:18,531 - INFO - Epoch 78/150 - Train Loss: 1.148739, Val Loss: 1.014750 +2025-06-30 17:32:45,246 - INFO - Epoch 79/150 - Train Loss: 1.152679, Val Loss: 1.015313 +2025-06-30 17:33:12,101 - INFO - Epoch 80/150 - Train Loss: 1.151642, Val Loss: 1.015295 +2025-06-30 17:33:38,231 - INFO - Epoch 81/150 - Train Loss: 1.150476, Val Loss: 1.014581 +2025-06-30 17:34:04,738 - INFO - Epoch 82/150 - Train Loss: 1.150879, Val Loss: 1.015313 +2025-06-30 17:34:35,558 - INFO - Epoch 83/150 - Train Loss: 1.146307, Val Loss: 1.014597 +2025-06-30 17:35:09,497 - INFO - Epoch 84/150 - Train Loss: 1.147950, Val Loss: 1.014687 +2025-06-30 17:35:42,730 - INFO - Epoch 85/150 - Train Loss: 1.151794, Val Loss: 1.015187 +2025-06-30 17:36:16,502 - INFO - Epoch 86/150 - Train Loss: 1.148333, Val Loss: 1.015448 +2025-06-30 17:36:49,471 - INFO - Epoch 87/150 - Train Loss: 1.150854, Val Loss: 1.014578 +2025-06-30 17:37:23,156 - INFO - Epoch 88/150 - Train Loss: 1.151282, Val Loss: 1.014658 +2025-06-30 17:37:57,641 - INFO - Epoch 89/150 - Train Loss: 1.147973, Val Loss: 1.015706 +2025-06-30 17:38:31,691 - INFO - Epoch 90/150 - Train Loss: 1.151156, Val Loss: 1.014912 +2025-06-30 17:39:04,718 - INFO - Epoch 91/150 - Train Loss: 1.152580, Val Loss: 1.014372 +2025-06-30 17:39:30,070 - INFO - Epoch 92/150 - Train Loss: 1.150741, Val Loss: 1.014595 +2025-06-30 17:39:57,261 - INFO - Epoch 93/150 - Train Loss: 1.148515, Val Loss: 1.014537 +2025-06-30 17:40:23,429 - INFO - Epoch 94/150 - Train Loss: 1.151005, Val Loss: 1.015277 +2025-06-30 17:40:50,227 - INFO - Epoch 95/150 - Train Loss: 1.153723, Val Loss: 1.015026 +2025-06-30 17:41:20,621 - INFO - Epoch 96/150 - Train Loss: 1.149982, Val Loss: 1.015000 +2025-06-30 17:42:00,621 - INFO - Epoch 97/150 - Train Loss: 1.153451, Val Loss: 1.014857 +2025-06-30 17:42:31,195 - INFO - Epoch 98/150 - Train Loss: 1.151845, Val Loss: 1.014428 +2025-06-30 17:43:01,146 - INFO - Epoch 99/150 - Train Loss: 1.146699, Val Loss: 1.015603 +2025-06-30 17:43:33,414 - INFO - Epoch 100/150 - Train Loss: 1.149745, Val Loss: 1.015130 +2025-06-30 17:44:07,836 - INFO - Epoch 101/150 - Train Loss: 1.151851, Val Loss: 1.015183 +2025-06-30 17:44:48,766 - INFO - Epoch 102/150 - Train Loss: 1.151895, Val Loss: 1.015093 +2025-06-30 17:45:22,267 - INFO - Epoch 103/150 - Train Loss: 1.152467, Val Loss: 1.015152 +2025-06-30 17:45:51,202 - INFO - Epoch 104/150 - Train Loss: 1.153332, Val Loss: 1.015086 +2025-06-30 17:46:18,307 - INFO - Epoch 105/150 - Train Loss: 1.153229, Val Loss: 1.014584 +2025-06-30 17:46:46,023 - INFO - Epoch 106/150 - Train Loss: 1.150959, Val Loss: 1.015254 +2025-06-30 17:47:15,066 - INFO - Epoch 107/150 - Train Loss: 1.150141, Val Loss: 1.014739 +2025-06-30 17:47:47,948 - INFO - Epoch 108/150 - Train Loss: 1.149630, Val Loss: 1.013919 +2025-06-30 17:48:22,245 - INFO - Epoch 109/150 - Train Loss: 1.151386, Val Loss: 1.015281 +2025-06-30 17:48:57,208 - INFO - Epoch 110/150 - Train Loss: 1.152757, Val Loss: 1.015360 +2025-06-30 17:49:32,081 - INFO - Epoch 111/150 - Train Loss: 1.152244, Val Loss: 1.014822 +2025-06-30 17:50:06,866 - INFO - Epoch 112/150 - Train Loss: 1.149596, Val Loss: 1.014999 +2025-06-30 17:50:41,306 - INFO - Epoch 113/150 - Train Loss: 1.151858, Val Loss: 1.015705 +2025-06-30 17:51:14,828 - INFO - Epoch 114/150 - Train Loss: 1.151614, Val Loss: 1.014928 +2025-06-30 17:51:48,589 - INFO - Epoch 115/150 - Train Loss: 1.152609, Val Loss: 1.014629 +2025-06-30 17:52:22,241 - INFO - Epoch 116/150 - Train Loss: 1.151527, Val Loss: 1.014652 +2025-06-30 17:52:56,074 - INFO - Epoch 117/150 - Train Loss: 1.149109, Val Loss: 1.014678 +2025-06-30 17:53:29,399 - INFO - Epoch 118/150 - Train Loss: 1.152869, Val Loss: 1.014989 +2025-06-30 17:54:02,238 - INFO - Epoch 119/150 - Train Loss: 1.150167, Val Loss: 1.014000 +2025-06-30 17:54:35,937 - INFO - Epoch 120/150 - Train Loss: 1.151869, Val Loss: 1.014707 +2025-06-30 17:55:09,869 - INFO - Epoch 121/150 - Train Loss: 1.152079, Val Loss: 1.015041 +2025-06-30 17:55:37,894 - INFO - Epoch 122/150 - Train Loss: 1.151798, Val Loss: 1.014780 +2025-06-30 17:56:11,460 - INFO - Epoch 123/150 - Train Loss: 1.150301, Val Loss: 1.014770 +2025-06-30 17:56:45,017 - INFO - Epoch 124/150 - Train Loss: 1.154219, Val Loss: 1.015151 +2025-06-30 17:57:18,827 - INFO - Epoch 125/150 - Train Loss: 1.148334, Val Loss: 1.014790 +2025-06-30 17:57:52,234 - INFO - Epoch 126/150 - Train Loss: 1.151422, Val Loss: 1.015385 +2025-06-30 17:58:25,448 - INFO - Epoch 127/150 - Train Loss: 1.151254, Val Loss: 1.015309 +2025-06-30 17:59:02,047 - INFO - Epoch 128/150 - Train Loss: 1.153110, Val Loss: 1.014931 +2025-06-30 17:59:36,800 - INFO - Epoch 129/150 - Train Loss: 1.150866, Val Loss: 1.014606 +2025-06-30 18:00:28,487 - INFO - Epoch 130/150 - Train Loss: 1.150708, Val Loss: 1.015156 +2025-06-30 18:00:55,497 - INFO - Epoch 131/150 - Train Loss: 1.151088, Val Loss: 1.015073 +2025-06-30 18:01:22,240 - INFO - Epoch 132/150 - Train Loss: 1.150347, Val Loss: 1.015175 +2025-06-30 18:01:49,458 - INFO - Epoch 133/150 - Train Loss: 1.151725, Val Loss: 1.014874 +2025-06-30 18:02:16,363 - INFO - Epoch 134/150 - Train Loss: 1.150342, Val Loss: 1.014693 +2025-06-30 18:02:41,978 - INFO - Epoch 135/150 - Train Loss: 1.147962, Val Loss: 1.015001 +2025-06-30 18:03:08,055 - INFO - Epoch 136/150 - Train Loss: 1.148906, Val Loss: 1.014719 +2025-06-30 18:03:35,334 - INFO - Epoch 137/150 - Train Loss: 1.150771, Val Loss: 1.014664 +2025-06-30 18:04:06,016 - INFO - Epoch 138/150 - Train Loss: 1.149895, Val Loss: 1.014495 +2025-06-30 18:04:39,633 - INFO - Epoch 139/150 - Train Loss: 1.149676, Val Loss: 1.015222 +2025-06-30 18:05:11,462 - INFO - Epoch 140/150 - Train Loss: 1.149190, Val Loss: 1.015104 +2025-06-30 18:05:38,234 - INFO - Epoch 141/150 - Train Loss: 1.149869, Val Loss: 1.015008 +2025-06-30 18:06:09,043 - INFO - Epoch 142/150 - Train Loss: 1.149840, Val Loss: 1.014385 +2025-06-30 18:06:42,399 - INFO - Epoch 143/150 - Train Loss: 1.151804, Val Loss: 1.014938 +2025-06-30 18:07:15,754 - INFO - Epoch 144/150 - Train Loss: 1.148924, Val Loss: 1.015386 +2025-06-30 18:07:48,489 - INFO - Epoch 145/150 - Train Loss: 1.150310, Val Loss: 1.015137 +2025-06-30 18:08:19,089 - INFO - Epoch 146/150 - Train Loss: 1.150626, Val Loss: 1.015166 +2025-06-30 18:08:45,484 - INFO - Epoch 147/150 - Train Loss: 1.151395, Val Loss: 1.014767 +2025-06-30 18:09:16,880 - INFO - Epoch 148/150 - Train Loss: 1.151253, Val Loss: 1.014992 +2025-06-30 18:09:50,882 - INFO - Epoch 149/150 - Train Loss: 1.151055, Val Loss: 1.015121 +2025-06-30 18:10:19,950 - INFO - Epoch 150/150 - Train Loss: 1.147844, Val Loss: 1.014008 +2025-06-30 18:10:20,318 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-06-30 18:10:20,357 - INFO - Testing the final model +2025-06-30 18:10:29,521 - INFO - Total MSE across all processes: 47.99393844604492 +2025-06-30 18:10:29,524 - INFO - mean value for all_targets: {tmp} +2025-06-30 18:10:29,527 - INFO - Test MSE: 0.999874, Test MAE: 0.609285, Max MAE: 20.959023, Test R2: 0.1096 +2025-06-30 18:10:29,527 - INFO - Relative L2 Error: 0.943193, Relative L1 error: 0.939679 +2025-06-30 18:10:29,527 - INFO - Total inference time: 0.02s for 48 samples +2025-06-30 18:10:29,529 - INFO - Testing the best model +2025-06-30 18:10:38,699 - INFO - Total MSE across all processes: 47.9373664855957 +2025-06-30 18:10:38,700 - INFO - mean value for all_targets: {tmp} +2025-06-30 18:10:38,702 - INFO - Test MSE: 0.998695, Test MAE: 0.608575, Max MAE: 20.926542, Test R2: 0.1107 +2025-06-30 18:10:38,702 - INFO - Relative L2 Error: 0.942515, Relative L1 error: 0.938526 +2025-06-30 18:10:38,702 - INFO - Total inference time: 0.02s for 48 samples +2025-07-01 13:07:17,378 - INFO - args.exp_name : Train_Test +2025-07-01 13:07:17,379 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, batch_size=12, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-01 13:07:17,383 - INFO - Starting training with 1 GPUs +2025-07-02 11:01:52,608 - INFO - args.exp_name : Train_Test +2025-07-02 11:01:52,610 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, batch_size=6, epochs=20, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-02 11:01:52,611 - INFO - Starting training with 1 GPUs +2025-07-02 11:01:54,881 - INFO - Total trainable parameters: 1437705 +2025-07-02 11:01:55,052 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-02 11:01:55,054 - INFO - Staring training for 20 epochs +2025-07-02 11:02:41,337 - INFO - Epoch 1/20 - Train Loss: 1.189811, Val Loss: 1.150744 +2025-07-02 11:02:41,358 - INFO - New best model saved with Val Loss: 1.150744 +2025-07-02 11:03:26,357 - INFO - Epoch 2/20 - Train Loss: 1.155146, Val Loss: 1.089376 +2025-07-02 11:03:26,376 - INFO - New best model saved with Val Loss: 1.089376 +2025-07-02 11:04:11,435 - INFO - Epoch 3/20 - Train Loss: 1.150646, Val Loss: 1.079947 +2025-07-02 11:04:11,454 - INFO - New best model saved with Val Loss: 1.079947 +2025-07-02 11:04:56,385 - INFO - Epoch 4/20 - Train Loss: 1.142872, Val Loss: 0.970974 +2025-07-02 11:04:56,403 - INFO - New best model saved with Val Loss: 0.970974 +2025-07-02 11:05:42,052 - INFO - Epoch 5/20 - Train Loss: 1.139206, Val Loss: 0.975353 +2025-07-02 11:06:26,939 - INFO - Epoch 6/20 - Train Loss: 1.135798, Val Loss: 0.916148 +2025-07-02 11:06:26,958 - INFO - New best model saved with Val Loss: 0.916148 +2025-07-02 11:07:11,961 - INFO - Epoch 7/20 - Train Loss: 1.137600, Val Loss: 0.967226 +2025-07-02 11:07:56,974 - INFO - Epoch 8/20 - Train Loss: 1.136670, Val Loss: 0.933321 +2025-07-02 11:08:41,797 - INFO - Epoch 9/20 - Train Loss: 1.135825, Val Loss: 0.914879 +2025-07-02 11:08:41,815 - INFO - New best model saved with Val Loss: 0.914879 +2025-07-02 11:09:26,590 - INFO - Epoch 10/20 - Train Loss: 1.135171, Val Loss: 0.883284 +2025-07-02 11:09:26,608 - INFO - New best model saved with Val Loss: 0.883284 +2025-07-02 11:10:11,535 - INFO - Epoch 11/20 - Train Loss: 1.133573, Val Loss: 0.897518 +2025-07-02 11:10:56,213 - INFO - Epoch 12/20 - Train Loss: 1.129549, Val Loss: 0.906144 +2025-07-02 11:11:40,861 - INFO - Epoch 13/20 - Train Loss: 1.133971, Val Loss: 0.902175 +2025-07-02 11:12:25,518 - INFO - Epoch 14/20 - Train Loss: 1.132658, Val Loss: 0.885658 +2025-07-02 11:13:10,196 - INFO - Epoch 15/20 - Train Loss: 1.132081, Val Loss: 0.859638 +2025-07-02 11:13:10,214 - INFO - New best model saved with Val Loss: 0.859638 +2025-07-02 11:13:54,945 - INFO - Epoch 16/20 - Train Loss: 1.132230, Val Loss: 0.887372 +2025-07-02 11:14:39,641 - INFO - Epoch 17/20 - Train Loss: 1.130843, Val Loss: 0.868037 +2025-07-02 11:15:24,444 - INFO - Epoch 18/20 - Train Loss: 1.130759, Val Loss: 0.848050 +2025-07-02 11:15:24,461 - INFO - New best model saved with Val Loss: 0.848050 +2025-07-02 11:16:09,136 - INFO - Epoch 19/20 - Train Loss: 1.131286, Val Loss: 0.849644 +2025-07-02 11:16:53,846 - INFO - Epoch 20/20 - Train Loss: 1.131221, Val Loss: 0.863135 +2025-07-02 11:16:54,013 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-07-02 11:16:54,014 - INFO - Testing the final model +2025-07-02 11:17:00,139 - INFO - Total MSE across all processes: 45.7446174621582 +2025-07-02 11:17:00,142 - INFO - mean value for all_targets: {tmp} +2025-07-02 20:11:12,558 - INFO - args.exp_name : Train_Test +2025-07-02 20:11:12,559 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-02 20:11:12,560 - INFO - Starting training with 1 GPUs +2025-07-02 20:11:18,258 - INFO - Total trainable parameters: 1437705 +2025-07-02 20:11:18,408 - INFO - Data loaded: 14 training batches, 3 validation batches, 4 test batches +2025-07-02 20:11:18,411 - INFO - Staring training for 150 epochs +2025-07-04 08:52:20,030 - INFO - args.exp_name : Train_Test +2025-07-04 08:52:20,036 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 08:52:20,036 - INFO - Starting training with 1 GPUs +2025-07-04 08:52:28,425 - INFO - Total trainable parameters: 1437705 +2025-07-04 08:52:28,524 - INFO - Data loaded: 14 training batches, 3 validation batches, 4 test batches +2025-07-04 08:52:28,526 - INFO - Staring training for 150 epochs +2025-07-04 08:52:41,547 - INFO - Epoch 1/150 - Train Loss: 1.241446, Val Loss: 1.177541 +2025-07-04 08:52:41,567 - INFO - New best model saved with Val Loss: 1.177541 +2025-07-04 08:52:51,204 - INFO - Epoch 2/150 - Train Loss: 1.186031, Val Loss: 1.122562 +2025-07-04 08:52:51,219 - INFO - New best model saved with Val Loss: 1.122562 +2025-07-04 08:53:00,904 - INFO - Epoch 3/150 - Train Loss: 1.179019, Val Loss: 1.108872 +2025-07-04 08:53:00,919 - INFO - New best model saved with Val Loss: 1.108872 +2025-07-04 08:53:10,568 - INFO - Epoch 4/150 - Train Loss: 1.178043, Val Loss: 1.094974 +2025-07-04 08:53:10,583 - INFO - New best model saved with Val Loss: 1.094974 +2025-07-04 08:53:20,216 - INFO - Epoch 5/150 - Train Loss: 1.174424, Val Loss: 1.076401 +2025-07-04 08:53:20,231 - INFO - New best model saved with Val Loss: 1.076401 +2025-07-04 08:53:29,865 - INFO - Epoch 6/150 - Train Loss: 1.172814, Val Loss: 1.058396 +2025-07-04 08:53:29,880 - INFO - New best model saved with Val Loss: 1.058396 +2025-07-04 08:53:39,495 - INFO - Epoch 7/150 - Train Loss: 1.173182, Val Loss: 1.028594 +2025-07-04 08:53:39,510 - INFO - New best model saved with Val Loss: 1.028594 +2025-07-04 08:53:49,153 - INFO - Epoch 8/150 - Train Loss: 1.170327, Val Loss: 1.008587 +2025-07-04 08:53:49,168 - INFO - New best model saved with Val Loss: 1.008587 +2025-07-04 08:53:58,792 - INFO - Epoch 9/150 - Train Loss: 1.165756, Val Loss: 1.012987 +2025-07-04 08:54:08,437 - INFO - Epoch 10/150 - Train Loss: 1.163875, Val Loss: 0.976703 +2025-07-04 08:54:08,452 - INFO - New best model saved with Val Loss: 0.976703 +2025-07-04 08:54:18,326 - INFO - Epoch 11/150 - Train Loss: 1.162383, Val Loss: 0.957697 +2025-07-04 08:54:18,341 - INFO - New best model saved with Val Loss: 0.957697 +2025-07-04 08:54:27,965 - INFO - Epoch 12/150 - Train Loss: 1.160220, Val Loss: 0.902563 +2025-07-04 08:54:27,979 - INFO - New best model saved with Val Loss: 0.902563 +2025-07-04 08:54:37,617 - INFO - Epoch 13/150 - Train Loss: 1.156775, Val Loss: 0.870499 +2025-07-04 08:54:37,632 - INFO - New best model saved with Val Loss: 0.870499 +2025-07-04 08:54:47,264 - INFO - Epoch 14/150 - Train Loss: 1.155465, Val Loss: 0.899167 +2025-07-04 08:54:56,908 - INFO - Epoch 15/150 - Train Loss: 1.157795, Val Loss: 0.917017 +2025-07-04 08:55:06,538 - INFO - Epoch 16/150 - Train Loss: 1.157543, Val Loss: 0.873709 +2025-07-04 08:55:16,192 - INFO - Epoch 17/150 - Train Loss: 1.156704, Val Loss: 0.897988 +2025-07-04 08:55:25,840 - INFO - Epoch 18/150 - Train Loss: 1.157150, Val Loss: 0.910194 +2025-07-04 08:55:35,489 - INFO - Epoch 19/150 - Train Loss: 1.155878, Val Loss: 0.874251 +2025-07-04 08:55:45,111 - INFO - Epoch 20/150 - Train Loss: 1.155908, Val Loss: 0.879709 +2025-07-04 08:55:54,845 - INFO - Epoch 21/150 - Train Loss: 1.155028, Val Loss: 0.908767 +2025-07-04 08:56:04,469 - INFO - Epoch 22/150 - Train Loss: 1.154765, Val Loss: 0.880731 +2025-07-04 08:56:14,100 - INFO - Epoch 23/150 - Train Loss: 1.155112, Val Loss: 0.878649 +2025-07-04 08:56:23,703 - INFO - Epoch 24/150 - Train Loss: 1.153464, Val Loss: 0.924204 +2025-07-04 08:56:33,328 - INFO - Epoch 25/150 - Train Loss: 1.152233, Val Loss: 0.866455 +2025-07-04 08:56:33,343 - INFO - New best model saved with Val Loss: 0.866455 +2025-07-04 08:56:42,961 - INFO - Epoch 26/150 - Train Loss: 1.153581, Val Loss: 0.881249 +2025-07-04 08:56:52,576 - INFO - Epoch 27/150 - Train Loss: 1.152243, Val Loss: 0.880733 +2025-07-04 08:57:02,192 - INFO - Epoch 28/150 - Train Loss: 1.152568, Val Loss: 0.878158 +2025-07-04 08:57:11,796 - INFO - Epoch 29/150 - Train Loss: 1.152290, Val Loss: 0.874380 +2025-07-04 08:57:21,424 - INFO - Epoch 30/150 - Train Loss: 1.152829, Val Loss: 0.875143 +2025-07-04 08:57:31,164 - INFO - Epoch 31/150 - Train Loss: 1.140987, Val Loss: 0.875324 +2025-07-04 08:57:40,777 - INFO - Epoch 32/150 - Train Loss: 1.152547, Val Loss: 0.878501 +2025-07-04 08:57:50,394 - INFO - Epoch 33/150 - Train Loss: 1.153246, Val Loss: 0.878727 +2025-07-04 08:57:59,994 - INFO - Epoch 34/150 - Train Loss: 1.154446, Val Loss: 0.871191 +2025-07-04 08:58:09,611 - INFO - Epoch 35/150 - Train Loss: 1.154074, Val Loss: 0.874708 +2025-07-04 08:58:19,222 - INFO - Epoch 36/150 - Train Loss: 1.151375, Val Loss: 0.872105 +2025-07-04 08:58:28,828 - INFO - Epoch 37/150 - Train Loss: 1.154260, Val Loss: 0.873454 +2025-07-04 08:58:38,443 - INFO - Epoch 38/150 - Train Loss: 1.153229, Val Loss: 0.875780 +2025-07-04 08:58:48,062 - INFO - Epoch 39/150 - Train Loss: 1.150356, Val Loss: 0.875329 +2025-07-04 08:58:57,699 - INFO - Epoch 40/150 - Train Loss: 1.153559, Val Loss: 0.875707 +2025-07-04 08:59:07,426 - INFO - Epoch 41/150 - Train Loss: 1.153825, Val Loss: 0.876817 +2025-07-04 08:59:17,053 - INFO - Epoch 42/150 - Train Loss: 1.152899, Val Loss: 0.875426 +2025-07-04 08:59:26,665 - INFO - Epoch 43/150 - Train Loss: 1.154966, Val Loss: 0.875305 +2025-07-04 08:59:36,320 - INFO - Epoch 44/150 - Train Loss: 1.152189, Val Loss: 0.874278 +2025-07-04 08:59:45,942 - INFO - Epoch 45/150 - Train Loss: 1.153907, Val Loss: 0.874331 +2025-07-04 08:59:55,604 - INFO - Epoch 46/150 - Train Loss: 1.153043, Val Loss: 0.875236 +2025-07-04 09:00:05,247 - INFO - Epoch 47/150 - Train Loss: 1.154773, Val Loss: 0.873472 +2025-07-04 09:00:14,904 - INFO - Epoch 48/150 - Train Loss: 1.153229, Val Loss: 0.872523 +2025-07-04 09:00:24,538 - INFO - Epoch 49/150 - Train Loss: 1.153976, Val Loss: 0.873105 +2025-07-04 09:00:34,170 - INFO - Epoch 50/150 - Train Loss: 1.152770, Val Loss: 0.873154 +2025-07-04 09:00:43,893 - INFO - Epoch 51/150 - Train Loss: 1.152427, Val Loss: 0.873966 +2025-07-04 09:00:53,532 - INFO - Epoch 52/150 - Train Loss: 1.152633, Val Loss: 0.872660 +2025-07-04 09:01:03,172 - INFO - Epoch 53/150 - Train Loss: 1.153153, Val Loss: 0.873497 +2025-07-04 09:01:12,807 - INFO - Epoch 54/150 - Train Loss: 1.152727, Val Loss: 0.873510 +2025-07-04 09:01:22,434 - INFO - Epoch 55/150 - Train Loss: 1.150634, Val Loss: 0.873938 +2025-07-04 09:01:32,068 - INFO - Epoch 56/150 - Train Loss: 1.152500, Val Loss: 0.874070 +2025-07-04 09:01:41,675 - INFO - Epoch 57/150 - Train Loss: 1.151416, Val Loss: 0.873577 +2025-07-04 09:01:51,238 - INFO - Epoch 58/150 - Train Loss: 1.153988, Val Loss: 0.873091 +2025-07-04 09:02:00,845 - INFO - Epoch 59/150 - Train Loss: 1.152799, Val Loss: 0.873124 +2025-07-04 09:02:10,479 - INFO - Epoch 60/150 - Train Loss: 1.153275, Val Loss: 0.873418 +2025-07-04 09:02:20,217 - INFO - Epoch 61/150 - Train Loss: 1.154078, Val Loss: 0.873387 +2025-07-04 09:02:29,835 - INFO - Epoch 62/150 - Train Loss: 1.152570, Val Loss: 0.872664 +2025-07-04 09:02:39,458 - INFO - Epoch 63/150 - Train Loss: 1.152630, Val Loss: 0.873286 +2025-07-04 09:02:49,085 - INFO - Epoch 64/150 - Train Loss: 1.151116, Val Loss: 0.873913 +2025-07-04 09:02:58,687 - INFO - Epoch 65/150 - Train Loss: 1.152723, Val Loss: 0.873274 +2025-07-04 09:03:08,300 - INFO - Epoch 66/150 - Train Loss: 1.151570, Val Loss: 0.872757 +2025-07-04 09:03:17,924 - INFO - Epoch 67/150 - Train Loss: 1.152477, Val Loss: 0.873092 +2025-07-04 09:03:27,548 - INFO - Epoch 68/150 - Train Loss: 1.150931, Val Loss: 0.873222 +2025-07-04 09:03:37,172 - INFO - Epoch 69/150 - Train Loss: 1.151670, Val Loss: 0.873713 +2025-07-04 09:03:46,815 - INFO - Epoch 70/150 - Train Loss: 1.153419, Val Loss: 0.873722 +2025-07-04 09:03:56,546 - INFO - Epoch 71/150 - Train Loss: 1.154862, Val Loss: 0.874301 +2025-07-04 09:04:06,136 - INFO - Epoch 72/150 - Train Loss: 1.152769, Val Loss: 0.873145 +2025-07-04 09:04:15,748 - INFO - Epoch 73/150 - Train Loss: 1.153697, Val Loss: 0.873199 +2025-07-04 09:04:25,355 - INFO - Epoch 74/150 - Train Loss: 1.151516, Val Loss: 0.874258 +2025-07-04 09:04:34,995 - INFO - Epoch 75/150 - Train Loss: 1.152371, Val Loss: 0.873783 +2025-07-04 09:04:44,652 - INFO - Epoch 76/150 - Train Loss: 1.152795, Val Loss: 0.873676 +2025-07-04 09:04:54,304 - INFO - Epoch 77/150 - Train Loss: 1.152586, Val Loss: 0.873685 +2025-07-04 09:05:03,930 - INFO - Epoch 78/150 - Train Loss: 1.154487, Val Loss: 0.873479 +2025-07-04 09:05:13,524 - INFO - Epoch 79/150 - Train Loss: 1.152081, Val Loss: 0.873248 +2025-07-04 09:05:23,144 - INFO - Epoch 80/150 - Train Loss: 1.153493, Val Loss: 0.873805 +2025-07-04 09:05:32,899 - INFO - Epoch 81/150 - Train Loss: 1.152695, Val Loss: 0.873204 +2025-07-04 09:05:42,528 - INFO - Epoch 82/150 - Train Loss: 1.152202, Val Loss: 0.873357 +2025-07-04 09:05:52,177 - INFO - Epoch 83/150 - Train Loss: 1.154032, Val Loss: 0.874270 +2025-07-04 09:06:01,780 - INFO - Epoch 84/150 - Train Loss: 1.152854, Val Loss: 0.873772 +2025-07-04 09:06:11,403 - INFO - Epoch 85/150 - Train Loss: 1.152796, Val Loss: 0.873991 +2025-07-04 09:06:21,006 - INFO - Epoch 86/150 - Train Loss: 1.152520, Val Loss: 0.873427 +2025-07-04 09:06:30,615 - INFO - Epoch 87/150 - Train Loss: 1.151348, Val Loss: 0.874311 +2025-07-04 09:06:40,243 - INFO - Epoch 88/150 - Train Loss: 1.153036, Val Loss: 0.873612 +2025-07-04 09:06:49,878 - INFO - Epoch 89/150 - Train Loss: 1.152249, Val Loss: 0.873483 +2025-07-04 09:06:59,520 - INFO - Epoch 90/150 - Train Loss: 1.152378, Val Loss: 0.873306 +2025-07-04 09:07:09,282 - INFO - Epoch 91/150 - Train Loss: 1.154740, Val Loss: 0.873980 +2025-07-04 09:07:18,914 - INFO - Epoch 92/150 - Train Loss: 1.153242, Val Loss: 0.873601 +2025-07-04 09:07:28,541 - INFO - Epoch 93/150 - Train Loss: 1.152556, Val Loss: 0.873785 +2025-07-04 09:07:38,185 - INFO - Epoch 94/150 - Train Loss: 1.152815, Val Loss: 0.873088 +2025-07-04 09:07:47,836 - INFO - Epoch 95/150 - Train Loss: 1.154911, Val Loss: 0.873480 +2025-07-04 09:07:57,489 - INFO - Epoch 96/150 - Train Loss: 1.151365, Val Loss: 0.873631 +2025-07-04 09:08:07,149 - INFO - Epoch 97/150 - Train Loss: 1.154766, Val Loss: 0.874919 +2025-07-04 09:08:16,771 - INFO - Epoch 98/150 - Train Loss: 1.152729, Val Loss: 0.874305 +2025-07-04 09:08:26,397 - INFO - Epoch 99/150 - Train Loss: 1.153715, Val Loss: 0.873401 +2025-07-04 09:08:36,032 - INFO - Epoch 100/150 - Train Loss: 1.152547, Val Loss: 0.873287 +2025-07-04 09:08:45,767 - INFO - Epoch 101/150 - Train Loss: 1.152440, Val Loss: 0.872898 +2025-07-04 09:08:55,373 - INFO - Epoch 102/150 - Train Loss: 1.152278, Val Loss: 0.872688 +2025-07-04 09:09:04,998 - INFO - Epoch 103/150 - Train Loss: 1.152758, Val Loss: 0.872871 +2025-07-04 09:09:14,622 - INFO - Epoch 104/150 - Train Loss: 1.152842, Val Loss: 0.873401 +2025-07-04 09:09:24,264 - INFO - Epoch 105/150 - Train Loss: 1.153891, Val Loss: 0.873807 +2025-07-04 09:09:33,897 - INFO - Epoch 106/150 - Train Loss: 1.153532, Val Loss: 0.874173 +2025-07-04 09:09:43,523 - INFO - Epoch 107/150 - Train Loss: 1.152759, Val Loss: 0.873700 +2025-07-04 09:09:53,144 - INFO - Epoch 108/150 - Train Loss: 1.153455, Val Loss: 0.872497 +2025-07-04 09:10:02,716 - INFO - Epoch 109/150 - Train Loss: 1.153009, Val Loss: 0.873867 +2025-07-04 09:10:12,336 - INFO - Epoch 110/150 - Train Loss: 1.154003, Val Loss: 0.873587 +2025-07-04 09:10:22,051 - INFO - Epoch 111/150 - Train Loss: 1.153342, Val Loss: 0.873033 +2025-07-04 09:10:31,692 - INFO - Epoch 112/150 - Train Loss: 1.154682, Val Loss: 0.874066 +2025-07-04 09:10:41,304 - INFO - Epoch 113/150 - Train Loss: 1.154469, Val Loss: 0.874227 +2025-07-04 09:10:50,916 - INFO - Epoch 114/150 - Train Loss: 1.152259, Val Loss: 0.873901 +2025-07-04 09:11:00,545 - INFO - Epoch 115/150 - Train Loss: 1.154351, Val Loss: 0.873536 +2025-07-04 09:11:10,150 - INFO - Epoch 116/150 - Train Loss: 1.151255, Val Loss: 0.873927 +2025-07-04 09:11:19,770 - INFO - Epoch 117/150 - Train Loss: 1.153796, Val Loss: 0.874327 +2025-07-04 09:11:29,393 - INFO - Epoch 118/150 - Train Loss: 1.151567, Val Loss: 0.873807 +2025-07-04 09:11:39,037 - INFO - Epoch 119/150 - Train Loss: 1.150924, Val Loss: 0.873749 +2025-07-04 09:11:48,649 - INFO - Epoch 120/150 - Train Loss: 1.152751, Val Loss: 0.873765 +2025-07-04 09:11:58,385 - INFO - Epoch 121/150 - Train Loss: 1.152423, Val Loss: 0.872958 +2025-07-04 09:12:08,003 - INFO - Epoch 122/150 - Train Loss: 1.152789, Val Loss: 0.873576 +2025-07-04 09:12:17,592 - INFO - Epoch 123/150 - Train Loss: 1.154129, Val Loss: 0.873808 +2025-07-04 09:12:27,202 - INFO - Epoch 124/150 - Train Loss: 1.151936, Val Loss: 0.873762 +2025-07-04 09:12:36,834 - INFO - Epoch 125/150 - Train Loss: 1.152352, Val Loss: 0.873750 +2025-07-04 09:12:46,484 - INFO - Epoch 126/150 - Train Loss: 1.155440, Val Loss: 0.873888 +2025-07-04 09:12:56,138 - INFO - Epoch 127/150 - Train Loss: 1.153147, Val Loss: 0.873769 +2025-07-04 09:13:05,770 - INFO - Epoch 128/150 - Train Loss: 1.151241, Val Loss: 0.874397 +2025-07-04 09:13:15,391 - INFO - Epoch 129/150 - Train Loss: 1.153389, Val Loss: 0.873702 +2025-07-04 09:13:25,023 - INFO - Epoch 130/150 - Train Loss: 1.154868, Val Loss: 0.873116 +2025-07-04 09:13:34,778 - INFO - Epoch 131/150 - Train Loss: 1.152740, Val Loss: 0.873281 +2025-07-04 09:13:44,427 - INFO - Epoch 132/150 - Train Loss: 1.152790, Val Loss: 0.873503 +2025-07-04 09:13:54,049 - INFO - Epoch 133/150 - Train Loss: 1.153289, Val Loss: 0.873433 +2025-07-04 09:14:03,687 - INFO - Epoch 134/150 - Train Loss: 1.152448, Val Loss: 0.873979 +2025-07-04 09:14:13,339 - INFO - Epoch 135/150 - Train Loss: 1.153034, Val Loss: 0.874439 +2025-07-04 09:14:22,965 - INFO - Epoch 136/150 - Train Loss: 1.152523, Val Loss: 0.873218 +2025-07-04 09:14:32,595 - INFO - Epoch 137/150 - Train Loss: 1.152435, Val Loss: 0.874103 +2025-07-04 09:14:42,199 - INFO - Epoch 138/150 - Train Loss: 1.152524, Val Loss: 0.874153 +2025-07-04 09:14:51,824 - INFO - Epoch 139/150 - Train Loss: 1.152752, Val Loss: 0.874754 +2025-07-04 09:15:01,444 - INFO - Epoch 140/150 - Train Loss: 1.152819, Val Loss: 0.874353 +2025-07-04 09:15:11,212 - INFO - Epoch 141/150 - Train Loss: 1.149516, Val Loss: 0.873920 +2025-07-04 09:15:20,859 - INFO - Epoch 142/150 - Train Loss: 1.151008, Val Loss: 0.872659 +2025-07-04 09:15:30,492 - INFO - Epoch 143/150 - Train Loss: 1.140574, Val Loss: 0.873182 +2025-07-04 09:15:40,157 - INFO - Epoch 144/150 - Train Loss: 1.140706, Val Loss: 0.873095 +2025-07-04 09:15:49,799 - INFO - Epoch 145/150 - Train Loss: 1.153340, Val Loss: 0.873945 +2025-07-04 09:15:59,441 - INFO - Epoch 146/150 - Train Loss: 1.151823, Val Loss: 0.873226 +2025-07-04 09:16:09,108 - INFO - Epoch 147/150 - Train Loss: 1.153316, Val Loss: 0.873210 +2025-07-04 09:16:18,725 - INFO - Epoch 148/150 - Train Loss: 1.151713, Val Loss: 0.873454 +2025-07-04 09:16:29,039 - INFO - Epoch 149/150 - Train Loss: 1.152790, Val Loss: 0.873946 +2025-07-04 09:16:38,662 - INFO - Epoch 150/150 - Train Loss: 1.152922, Val Loss: 0.873481 +2025-07-04 09:16:38,811 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-07-04 09:16:38,820 - INFO - Testing the final model +2025-07-04 09:16:42,320 - INFO - Total MSE across all processes: 21.41421890258789 +2025-07-04 09:16:42,321 - INFO - mean value for all_targets: {tmp} +2025-07-04 09:16:42,323 - INFO - Test MSE: 0.892259, Test MAE: 0.572756, Max AE: 16.443457, Test R2: 0.2171 +2025-07-04 09:16:42,323 - INFO - Relative L2 Error: 0.884298, Relative L1 error: 0.878014 +2025-07-04 09:16:42,323 - INFO - Total inference time: 0.01s for 24 samples +2025-07-04 09:59:21,112 - INFO - args.exp_name : Train_Test +2025-07-04 09:59:21,114 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=6, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 09:59:21,114 - INFO - Starting training with 1 GPUs +2025-07-04 09:59:25,975 - INFO - Total trainable parameters: 1437705 +2025-07-04 09:59:26,039 - INFO - Data loaded: 2 training batches, 0 validation batches, 1 test batches +2025-07-04 09:59:26,042 - INFO - Staring training for 50 epochs +2025-07-04 10:26:58,897 - INFO - args.exp_name : Train_Test +2025-07-04 10:26:58,903 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=6, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 10:26:58,903 - INFO - Starting training with 1 GPUs +2025-07-04 10:27:03,799 - INFO - Total trainable parameters: 1437705 +2025-07-04 10:27:03,857 - INFO - Data loaded: 2 training batches, 0 validation batches, 1 test batches +2025-07-04 10:27:03,860 - INFO - Staring training for 50 epochs +2025-07-04 10:31:47,052 - INFO - args.exp_name : Train_Test +2025-07-04 10:31:47,056 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=6, epochs=50, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 10:31:47,057 - INFO - Starting training with 1 GPUs +2025-07-04 10:31:50,475 - INFO - Total trainable parameters: 1437705 +2025-07-04 10:31:50,539 - INFO - Data loaded: 3 training batches, 1 validation batches, 1 test batches +2025-07-04 10:31:50,539 - INFO - Staring training for 50 epochs +2025-07-04 10:31:59,013 - INFO - Epoch 1/50 - Train Loss: 1.283437, Val Loss: 1.146866 +2025-07-04 10:31:59,031 - INFO - New best model saved with Val Loss: 1.146866 +2025-07-04 10:32:05,120 - INFO - Epoch 2/50 - Train Loss: 1.159811, Val Loss: 1.148012 +2025-07-04 10:32:11,178 - INFO - Epoch 3/50 - Train Loss: 1.015355, Val Loss: 1.148288 +2025-07-04 10:32:17,249 - INFO - Epoch 4/50 - Train Loss: 0.916734, Val Loss: 1.249089 +2025-07-04 10:32:23,285 - INFO - Epoch 5/50 - Train Loss: 0.839702, Val Loss: 1.442995 +2025-07-04 10:32:29,338 - INFO - Epoch 6/50 - Train Loss: 0.757819, Val Loss: 1.361055 +2025-07-04 10:32:35,401 - INFO - Epoch 7/50 - Train Loss: 0.661384, Val Loss: 1.379884 +2025-07-04 10:32:41,459 - INFO - Epoch 8/50 - Train Loss: 0.608690, Val Loss: 1.595568 +2025-07-04 10:32:47,524 - INFO - Epoch 9/50 - Train Loss: 0.560139, Val Loss: 1.838627 +2025-07-04 10:32:53,574 - INFO - Epoch 10/50 - Train Loss: 0.526667, Val Loss: 1.521763 +2025-07-04 10:32:59,809 - INFO - Epoch 11/50 - Train Loss: 0.494009, Val Loss: 1.437564 +2025-07-04 10:33:05,869 - INFO - Epoch 12/50 - Train Loss: 0.474216, Val Loss: 1.348367 +2025-07-04 10:33:11,941 - INFO - Epoch 13/50 - Train Loss: 0.457834, Val Loss: 1.089559 +2025-07-04 10:33:11,957 - INFO - New best model saved with Val Loss: 1.089559 +2025-07-04 10:33:18,018 - INFO - Epoch 14/50 - Train Loss: 0.455360, Val Loss: 0.832426 +2025-07-04 10:33:18,031 - INFO - New best model saved with Val Loss: 0.832426 +2025-07-04 10:33:24,074 - INFO - Epoch 15/50 - Train Loss: 0.447783, Val Loss: 0.654857 +2025-07-04 10:33:24,097 - INFO - New best model saved with Val Loss: 0.654857 +2025-07-04 10:33:30,177 - INFO - Epoch 16/50 - Train Loss: 0.446389, Val Loss: 0.548675 +2025-07-04 10:33:30,191 - INFO - New best model saved with Val Loss: 0.548675 +2025-07-04 10:33:36,251 - INFO - Epoch 17/50 - Train Loss: 0.440610, Val Loss: 0.495317 +2025-07-04 10:33:36,265 - INFO - New best model saved with Val Loss: 0.495317 +2025-07-04 10:33:42,334 - INFO - Epoch 18/50 - Train Loss: 0.438833, Val Loss: 0.464275 +2025-07-04 10:33:42,347 - INFO - New best model saved with Val Loss: 0.464275 +2025-07-04 10:33:48,433 - INFO - Epoch 19/50 - Train Loss: 0.439969, Val Loss: 0.443737 +2025-07-04 10:33:48,447 - INFO - New best model saved with Val Loss: 0.443737 +2025-07-04 10:33:54,533 - INFO - Epoch 20/50 - Train Loss: 0.431883, Val Loss: 0.428062 +2025-07-04 10:33:54,547 - INFO - New best model saved with Val Loss: 0.428062 +2025-07-04 10:34:00,781 - INFO - Epoch 21/50 - Train Loss: 0.431155, Val Loss: 0.423547 +2025-07-04 10:34:00,796 - INFO - New best model saved with Val Loss: 0.423547 +2025-07-04 10:34:06,859 - INFO - Epoch 22/50 - Train Loss: 0.428144, Val Loss: 0.426146 +2025-07-04 10:34:12,893 - INFO - Epoch 23/50 - Train Loss: 0.425435, Val Loss: 0.427524 +2025-07-04 10:34:18,948 - INFO - Epoch 24/50 - Train Loss: 0.422210, Val Loss: 0.422279 +2025-07-04 10:34:18,962 - INFO - New best model saved with Val Loss: 0.422279 +2025-07-04 10:34:25,023 - INFO - Epoch 25/50 - Train Loss: 0.423424, Val Loss: 0.427988 +2025-07-04 10:34:31,079 - INFO - Epoch 26/50 - Train Loss: 0.416589, Val Loss: 0.438708 +2025-07-04 10:34:37,114 - INFO - Epoch 27/50 - Train Loss: 0.415543, Val Loss: 0.429039 +2025-07-04 10:34:43,202 - INFO - Epoch 28/50 - Train Loss: 0.413719, Val Loss: 0.420002 +2025-07-04 10:34:43,218 - INFO - New best model saved with Val Loss: 0.420002 +2025-07-04 10:34:49,247 - INFO - Epoch 29/50 - Train Loss: 0.410406, Val Loss: 0.413966 +2025-07-04 10:34:49,260 - INFO - New best model saved with Val Loss: 0.413966 +2025-07-04 10:34:55,327 - INFO - Epoch 30/50 - Train Loss: 0.409573, Val Loss: 0.411146 +2025-07-04 10:34:55,341 - INFO - New best model saved with Val Loss: 0.411146 +2025-07-04 10:35:01,513 - INFO - Epoch 31/50 - Train Loss: 0.407763, Val Loss: 0.405612 +2025-07-04 10:35:01,527 - INFO - New best model saved with Val Loss: 0.405612 +2025-07-04 10:35:07,573 - INFO - Epoch 32/50 - Train Loss: 0.407623, Val Loss: 0.406176 +2025-07-04 10:35:13,616 - INFO - Epoch 33/50 - Train Loss: 0.403604, Val Loss: 0.407962 +2025-07-04 10:35:19,690 - INFO - Epoch 34/50 - Train Loss: 0.399802, Val Loss: 0.401497 +2025-07-04 10:35:19,714 - INFO - New best model saved with Val Loss: 0.401497 +2025-07-04 10:35:25,778 - INFO - Epoch 35/50 - Train Loss: 0.400421, Val Loss: 0.400646 +2025-07-04 10:35:25,792 - INFO - New best model saved with Val Loss: 0.400646 +2025-07-04 10:35:31,848 - INFO - Epoch 36/50 - Train Loss: 0.395775, Val Loss: 0.403946 +2025-07-04 10:35:37,907 - INFO - Epoch 37/50 - Train Loss: 0.395949, Val Loss: 0.401222 +2025-07-04 10:35:43,955 - INFO - Epoch 38/50 - Train Loss: 0.393724, Val Loss: 0.398120 +2025-07-04 10:35:43,969 - INFO - New best model saved with Val Loss: 0.398120 +2025-07-04 10:35:50,023 - INFO - Epoch 39/50 - Train Loss: 0.390039, Val Loss: 0.399025 +2025-07-04 10:35:56,058 - INFO - Epoch 40/50 - Train Loss: 0.391684, Val Loss: 0.390313 +2025-07-04 10:35:56,072 - INFO - New best model saved with Val Loss: 0.390313 +2025-07-04 10:36:02,256 - INFO - Epoch 41/50 - Train Loss: 0.386746, Val Loss: 0.386531 +2025-07-04 10:36:02,269 - INFO - New best model saved with Val Loss: 0.386531 +2025-07-04 10:36:08,328 - INFO - Epoch 42/50 - Train Loss: 0.386503, Val Loss: 0.384749 +2025-07-04 10:36:08,341 - INFO - New best model saved with Val Loss: 0.384749 +2025-07-04 10:36:14,404 - INFO - Epoch 43/50 - Train Loss: 0.385221, Val Loss: 0.382403 +2025-07-04 10:36:14,419 - INFO - New best model saved with Val Loss: 0.382403 +2025-07-04 10:36:20,488 - INFO - Epoch 44/50 - Train Loss: 0.379563, Val Loss: 0.379401 +2025-07-04 10:36:20,502 - INFO - New best model saved with Val Loss: 0.379401 +2025-07-04 10:36:26,565 - INFO - Epoch 45/50 - Train Loss: 0.381152, Val Loss: 0.383046 +2025-07-04 10:36:32,639 - INFO - Epoch 46/50 - Train Loss: 0.377841, Val Loss: 0.374077 +2025-07-04 10:36:32,654 - INFO - New best model saved with Val Loss: 0.374077 +2025-07-04 10:36:38,710 - INFO - Epoch 47/50 - Train Loss: 0.376989, Val Loss: 0.374267 +2025-07-04 10:36:44,771 - INFO - Epoch 48/50 - Train Loss: 0.372625, Val Loss: 0.375827 +2025-07-04 10:36:50,824 - INFO - Epoch 49/50 - Train Loss: 0.373013, Val Loss: 0.377917 +2025-07-04 10:36:56,862 - INFO - Epoch 50/50 - Train Loss: 0.370192, Val Loss: 0.374004 +2025-07-04 10:36:56,876 - INFO - New best model saved with Val Loss: 0.374004 +2025-07-04 10:36:57,017 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-07-04 10:36:57,023 - INFO - Testing the final model +2025-07-04 10:36:59,752 - INFO - Total MSE across all processes: 2.023336172103882 +2025-07-04 10:36:59,753 - INFO - mean value for all_targets: {tmp} +2025-07-04 10:36:59,753 - INFO - Test MSE: 0.337223, Test MAE: 0.317886, Max AE: 7.466796, Test R2: 0.7012 +2025-07-04 10:36:59,753 - INFO - Relative L2 Error: 0.546824, Relative L1 error: 0.485240 +2025-07-04 10:36:59,753 - INFO - Total inference time: 0.00s for 6 samples +2025-07-04 10:36:59,755 - INFO - Testing the best model +2025-07-04 10:37:02,454 - INFO - Total MSE across all processes: 2.023336172103882 +2025-07-04 10:37:02,454 - INFO - mean value for all_targets: {tmp} +2025-07-04 10:37:02,454 - INFO - Test MSE: 0.337223, Test MAE: 0.317886, Max AE: 7.466796, Test R2: 0.7012 +2025-07-04 10:37:02,455 - INFO - Relative L2 Error: 0.546824, Relative L1 error: 0.485240 +2025-07-04 10:37:02,455 - INFO - Total inference time: 0.00s for 6 samples +2025-07-04 10:57:13,577 - INFO - args.exp_name : Train_Test +2025-07-04 10:57:13,578 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 10:57:13,578 - INFO - Starting training with 1 GPUs +2025-07-04 10:57:16,876 - INFO - Total trainable parameters: 1437705 +2025-07-04 10:57:17,065 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-04 10:57:17,068 - INFO - Staring training for 150 epochs +2025-07-04 10:57:37,225 - INFO - Epoch 1/150 - Train Loss: 0.765392, Val Loss: 1.117532 +2025-07-04 10:57:37,258 - INFO - New best model saved with Val Loss: 1.117532 +2025-07-04 10:57:54,967 - INFO - Epoch 2/150 - Train Loss: 0.425968, Val Loss: 0.480214 +2025-07-04 10:57:54,982 - INFO - New best model saved with Val Loss: 0.480214 +2025-07-04 10:58:12,671 - INFO - Epoch 3/150 - Train Loss: 0.359811, Val Loss: 0.444877 +2025-07-04 10:58:12,686 - INFO - New best model saved with Val Loss: 0.444877 +2025-07-04 10:58:30,381 - INFO - Epoch 4/150 - Train Loss: 0.312085, Val Loss: 0.590544 +2025-07-04 10:58:48,122 - INFO - Epoch 5/150 - Train Loss: 0.284683, Val Loss: 0.833210 +2025-07-04 10:59:05,840 - INFO - Epoch 6/150 - Train Loss: 0.266364, Val Loss: 1.044873 +2025-07-04 10:59:23,565 - INFO - Epoch 7/150 - Train Loss: 0.244517, Val Loss: 0.458772 +2025-07-04 10:59:41,802 - INFO - Epoch 8/150 - Train Loss: 0.227804, Val Loss: 1.118219 +2025-07-04 10:59:59,532 - INFO - Epoch 9/150 - Train Loss: 0.222722, Val Loss: 0.292218 +2025-07-04 10:59:59,547 - INFO - New best model saved with Val Loss: 0.292218 +2025-07-04 11:00:17,280 - INFO - Epoch 10/150 - Train Loss: 0.213346, Val Loss: 0.264306 +2025-07-04 11:00:17,295 - INFO - New best model saved with Val Loss: 0.264306 +2025-07-04 11:00:35,156 - INFO - Epoch 11/150 - Train Loss: 0.198541, Val Loss: 1.010461 +2025-07-04 11:00:52,876 - INFO - Epoch 12/150 - Train Loss: 0.188102, Val Loss: 0.402349 +2025-07-04 11:01:10,606 - INFO - Epoch 13/150 - Train Loss: 0.190205, Val Loss: 0.351225 +2025-07-04 11:01:28,360 - INFO - Epoch 14/150 - Train Loss: 0.188799, Val Loss: 0.817092 +2025-07-04 11:01:46,206 - INFO - Epoch 15/150 - Train Loss: 0.181614, Val Loss: 0.227752 +2025-07-04 11:01:46,221 - INFO - New best model saved with Val Loss: 0.227752 +2025-07-04 11:02:05,107 - INFO - Epoch 16/150 - Train Loss: 0.177835, Val Loss: 0.312880 +2025-07-04 11:02:23,725 - INFO - Epoch 17/150 - Train Loss: 0.174542, Val Loss: 0.204217 +2025-07-04 11:02:23,740 - INFO - New best model saved with Val Loss: 0.204217 +2025-07-04 11:02:41,459 - INFO - Epoch 18/150 - Train Loss: 0.171377, Val Loss: 0.178336 +2025-07-04 11:02:41,473 - INFO - New best model saved with Val Loss: 0.178336 +2025-07-04 11:02:59,214 - INFO - Epoch 19/150 - Train Loss: 0.167564, Val Loss: 0.218340 +2025-07-04 11:03:16,955 - INFO - Epoch 20/150 - Train Loss: 0.167842, Val Loss: 0.332284 +2025-07-04 11:03:34,826 - INFO - Epoch 21/150 - Train Loss: 0.163059, Val Loss: 0.209619 +2025-07-04 11:03:52,539 - INFO - Epoch 22/150 - Train Loss: 0.160918, Val Loss: 0.248925 +2025-07-04 11:04:10,271 - INFO - Epoch 23/150 - Train Loss: 0.157175, Val Loss: 0.624631 +2025-07-04 11:04:28,006 - INFO - Epoch 24/150 - Train Loss: 0.156025, Val Loss: 0.170956 +2025-07-04 11:04:28,021 - INFO - New best model saved with Val Loss: 0.170956 +2025-07-04 11:04:45,800 - INFO - Epoch 25/150 - Train Loss: 0.153771, Val Loss: 0.321865 +2025-07-04 11:05:03,538 - INFO - Epoch 26/150 - Train Loss: 0.148996, Val Loss: 0.149375 +2025-07-04 11:05:03,553 - INFO - New best model saved with Val Loss: 0.149375 +2025-07-04 11:05:21,299 - INFO - Epoch 27/150 - Train Loss: 0.149616, Val Loss: 0.178216 +2025-07-04 11:05:39,054 - INFO - Epoch 28/150 - Train Loss: 0.154057, Val Loss: 0.161862 +2025-07-04 11:05:56,757 - INFO - Epoch 29/150 - Train Loss: 0.146227, Val Loss: 0.175928 +2025-07-04 11:06:14,447 - INFO - Epoch 30/150 - Train Loss: 0.146135, Val Loss: 0.377274 +2025-07-04 11:06:32,248 - INFO - Epoch 31/150 - Train Loss: 0.145402, Val Loss: 0.213034 +2025-07-04 11:06:49,915 - INFO - Epoch 32/150 - Train Loss: 0.142771, Val Loss: 0.151741 +2025-07-04 11:07:07,605 - INFO - Epoch 33/150 - Train Loss: 0.145088, Val Loss: 0.146101 +2025-07-04 11:07:07,772 - INFO - New best model saved with Val Loss: 0.146101 +2025-07-04 11:07:25,485 - INFO - Epoch 34/150 - Train Loss: 0.139568, Val Loss: 0.273251 +2025-07-04 11:07:43,166 - INFO - Epoch 35/150 - Train Loss: 0.140397, Val Loss: 0.148392 +2025-07-04 11:08:00,841 - INFO - Epoch 36/150 - Train Loss: 0.136741, Val Loss: 0.150139 +2025-07-04 11:08:18,529 - INFO - Epoch 37/150 - Train Loss: 0.135322, Val Loss: 0.148857 +2025-07-04 11:08:36,229 - INFO - Epoch 38/150 - Train Loss: 0.132527, Val Loss: 0.130883 +2025-07-04 11:08:36,244 - INFO - New best model saved with Val Loss: 0.130883 +2025-07-04 11:08:53,921 - INFO - Epoch 39/150 - Train Loss: 0.135555, Val Loss: 0.149993 +2025-07-04 11:09:11,628 - INFO - Epoch 40/150 - Train Loss: 0.135385, Val Loss: 0.164116 +2025-07-04 11:09:29,416 - INFO - Epoch 41/150 - Train Loss: 0.132284, Val Loss: 0.146587 +2025-07-04 11:09:47,072 - INFO - Epoch 42/150 - Train Loss: 0.133476, Val Loss: 0.166374 +2025-07-04 11:10:04,740 - INFO - Epoch 43/150 - Train Loss: 0.132089, Val Loss: 0.138480 +2025-07-04 11:10:22,422 - INFO - Epoch 44/150 - Train Loss: 0.131016, Val Loss: 0.134061 +2025-07-04 11:10:40,137 - INFO - Epoch 45/150 - Train Loss: 0.131406, Val Loss: 0.128516 +2025-07-04 11:10:40,153 - INFO - New best model saved with Val Loss: 0.128516 +2025-07-04 11:10:57,844 - INFO - Epoch 46/150 - Train Loss: 0.127681, Val Loss: 0.202615 +2025-07-04 11:11:15,682 - INFO - Epoch 47/150 - Train Loss: 0.126269, Val Loss: 0.136413 +2025-07-04 11:11:33,387 - INFO - Epoch 48/150 - Train Loss: 0.128555, Val Loss: 0.211257 +2025-07-04 11:11:51,064 - INFO - Epoch 49/150 - Train Loss: 0.128324, Val Loss: 0.134678 +2025-07-04 11:12:09,127 - INFO - Epoch 50/150 - Train Loss: 0.125648, Val Loss: 0.164913 +2025-07-04 11:12:12,302 - INFO - args.exp_name : Train_Test +2025-07-04 11:12:12,305 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 11:12:12,305 - INFO - Starting training with 1 GPUs +2025-07-04 11:12:15,606 - INFO - Total trainable parameters: 1437705 +2025-07-04 11:12:15,739 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-04 11:12:15,740 - INFO - Staring training for 150 epochs +2025-07-04 11:12:19,921 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:12:19,921 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:12:27,217 - INFO - Epoch 51/150 - Train Loss: 0.124183, Val Loss: 0.121055 +2025-07-04 11:12:27,249 - INFO - New best model saved with Val Loss: 0.121055 +2025-07-04 11:12:44,946 - INFO - Epoch 52/150 - Train Loss: 0.125603, Val Loss: 0.207307 +2025-07-04 11:13:02,677 - INFO - Epoch 53/150 - Train Loss: 0.122962, Val Loss: 0.134787 +2025-07-04 11:13:20,428 - INFO - Epoch 54/150 - Train Loss: 0.122586, Val Loss: 0.152079 +2025-07-04 11:13:38,151 - INFO - Epoch 55/150 - Train Loss: 0.122021, Val Loss: 0.123622 +2025-07-04 11:13:55,889 - INFO - Epoch 56/150 - Train Loss: 0.121857, Val Loss: 0.212121 +2025-07-04 11:14:13,639 - INFO - Epoch 57/150 - Train Loss: 0.123328, Val Loss: 0.150480 +2025-07-04 11:14:37,412 - INFO - Epoch 58/150 - Train Loss: 0.122286, Val Loss: 0.167857 +2025-07-04 11:14:55,379 - INFO - Epoch 59/150 - Train Loss: 0.121922, Val Loss: 0.140145 +2025-07-04 11:15:13,186 - INFO - Epoch 60/150 - Train Loss: 0.117993, Val Loss: 0.169449 +2025-07-04 11:15:31,080 - INFO - Epoch 61/150 - Train Loss: 0.120024, Val Loss: 0.118396 +2025-07-04 11:15:31,111 - INFO - New best model saved with Val Loss: 0.118396 +2025-07-04 11:15:48,911 - INFO - Epoch 62/150 - Train Loss: 0.120212, Val Loss: 0.193534 +2025-07-04 11:16:06,679 - INFO - Epoch 63/150 - Train Loss: 0.121789, Val Loss: 0.137048 +2025-07-04 11:16:24,498 - INFO - Epoch 64/150 - Train Loss: 0.119106, Val Loss: 0.115732 +2025-07-04 11:16:24,513 - INFO - New best model saved with Val Loss: 0.115732 +2025-07-04 11:16:42,283 - INFO - Epoch 65/150 - Train Loss: 0.116286, Val Loss: 0.150539 +2025-07-04 11:17:00,073 - INFO - Epoch 66/150 - Train Loss: 0.116608, Val Loss: 0.204416 +2025-07-04 11:17:17,859 - INFO - Epoch 67/150 - Train Loss: 0.116302, Val Loss: 0.160922 +2025-07-04 11:17:35,638 - INFO - Epoch 68/150 - Train Loss: 0.120537, Val Loss: 0.140712 +2025-07-04 11:17:53,397 - INFO - Epoch 69/150 - Train Loss: 0.116008, Val Loss: 0.133827 +2025-07-04 11:18:11,186 - INFO - Epoch 70/150 - Train Loss: 0.117500, Val Loss: 0.125367 +2025-07-04 11:18:29,100 - INFO - Epoch 71/150 - Train Loss: 0.115497, Val Loss: 0.111867 +2025-07-04 11:18:29,117 - INFO - New best model saved with Val Loss: 0.111867 +2025-07-04 11:18:46,862 - INFO - Epoch 72/150 - Train Loss: 0.113848, Val Loss: 0.149078 +2025-07-04 11:19:04,584 - INFO - Epoch 73/150 - Train Loss: 0.115463, Val Loss: 0.112712 +2025-07-04 11:19:22,329 - INFO - Epoch 74/150 - Train Loss: 0.112697, Val Loss: 0.117975 +2025-07-04 11:19:39,642 - INFO - args.exp_name : Train_Test +2025-07-04 11:19:39,642 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 11:19:39,643 - INFO - Starting training with 1 GPUs +2025-07-04 11:19:40,162 - INFO - Epoch 75/150 - Train Loss: 0.116695, Val Loss: 0.156708 +2025-07-04 11:19:43,809 - INFO - Total trainable parameters: 1437705 +2025-07-04 11:19:43,945 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-04 11:19:43,948 - INFO - Staring training for 150 epochs +2025-07-04 11:19:58,452 - INFO - Epoch 76/150 - Train Loss: 0.115100, Val Loss: 0.130494 +2025-07-04 11:20:16,182 - INFO - Epoch 77/150 - Train Loss: 0.112540, Val Loss: 0.115179 +2025-07-04 11:20:33,917 - INFO - Epoch 78/150 - Train Loss: 0.111732, Val Loss: 0.116926 +2025-07-04 11:20:52,823 - INFO - Epoch 79/150 - Train Loss: 0.110727, Val Loss: 0.126542 +2025-07-04 11:20:54,270 - INFO - args.exp_name : Train_Test +2025-07-04 11:20:54,271 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 11:20:54,271 - INFO - Starting training with 1 GPUs +2025-07-04 11:20:58,251 - INFO - Total trainable parameters: 1437705 +2025-07-04 11:20:58,314 - INFO - Data loaded: 3 training batches, 1 validation batches, 1 test batches +2025-07-04 11:20:58,314 - INFO - Staring training for 150 epochs +2025-07-04 11:21:10,629 - INFO - Epoch 80/150 - Train Loss: 0.111588, Val Loss: 0.117037 +2025-07-04 11:21:28,458 - INFO - Epoch 81/150 - Train Loss: 0.111193, Val Loss: 0.166504 +2025-07-04 11:21:46,188 - INFO - Epoch 82/150 - Train Loss: 0.111560, Val Loss: 0.212419 +2025-07-04 11:22:03,922 - INFO - Epoch 83/150 - Train Loss: 0.103109, Val Loss: 0.092613 +2025-07-04 11:22:03,939 - INFO - New best model saved with Val Loss: 0.092613 +2025-07-04 11:22:13,605 - INFO - args.exp_name : Train_Test +2025-07-04 11:22:13,605 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 11:22:13,605 - INFO - Starting training with 1 GPUs +2025-07-04 11:22:17,612 - INFO - Total trainable parameters: 1437705 +2025-07-04 11:22:17,640 - INFO - Data loaded: 3 training batches, 1 validation batches, 1 test batches +2025-07-04 11:22:17,643 - INFO - Staring training for 150 epochs +2025-07-04 11:22:21,988 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:22:21,989 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:22:23,643 - INFO - Epoch 84/150 - Train Loss: 0.098936, Val Loss: 0.090915 +2025-07-04 11:22:23,660 - INFO - New best model saved with Val Loss: 0.090915 +2025-07-04 11:22:41,391 - INFO - Epoch 85/150 - Train Loss: 0.097921, Val Loss: 0.090856 +2025-07-04 11:22:41,407 - INFO - New best model saved with Val Loss: 0.090856 +2025-07-04 11:22:59,119 - INFO - Epoch 86/150 - Train Loss: 0.097712, Val Loss: 0.091261 +2025-07-04 11:23:16,809 - INFO - Epoch 87/150 - Train Loss: 0.097985, Val Loss: 0.089322 +2025-07-04 11:23:16,824 - INFO - New best model saved with Val Loss: 0.089322 +2025-07-04 11:23:34,524 - INFO - Epoch 88/150 - Train Loss: 0.097777, Val Loss: 0.090429 +2025-07-04 11:23:52,215 - INFO - Epoch 89/150 - Train Loss: 0.097160, Val Loss: 0.090603 +2025-07-04 11:24:09,894 - INFO - Epoch 90/150 - Train Loss: 0.097306, Val Loss: 0.090642 +2025-07-04 11:24:27,724 - INFO - Epoch 91/150 - Train Loss: 0.096543, Val Loss: 0.092955 +2025-07-04 11:24:45,444 - INFO - Epoch 92/150 - Train Loss: 0.097235, Val Loss: 0.090075 +2025-07-04 11:25:03,135 - INFO - Epoch 93/150 - Train Loss: 0.096742, Val Loss: 0.090382 +2025-07-04 11:25:20,910 - INFO - Epoch 94/150 - Train Loss: 0.095817, Val Loss: 0.090097 +2025-07-04 11:25:26,480 - INFO - args.exp_name : Train_Test +2025-07-04 11:25:26,484 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 11:25:26,485 - INFO - Starting training with 1 GPUs +2025-07-04 11:25:30,407 - INFO - Total trainable parameters: 1437705 +2025-07-04 11:25:30,433 - INFO - Data loaded: 3 training batches, 1 validation batches, 1 test batches +2025-07-04 11:25:30,437 - INFO - Staring training for 150 epochs +2025-07-04 11:25:34,711 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:25:34,717 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:25:34,718 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:25:34,718 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:34,718 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:34,742 - INFO - After Normalization*************************************** +2025-07-04 11:25:34,742 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:34,742 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:35,580 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:25:35,580 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:25:35,580 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:25:35,580 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:35,580 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:35,580 - INFO - After Normalization*************************************** +2025-07-04 11:25:35,580 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:35,581 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:35,875 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:25:35,875 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:25:35,876 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:25:35,876 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:35,876 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:35,876 - INFO - After Normalization*************************************** +2025-07-04 11:25:35,876 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:35,876 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:39,114 - INFO - Epoch 1/150 - Train Loss: 1.280600, Val Loss: 1.146895 +2025-07-04 11:25:39,132 - INFO - New best model saved with Val Loss: 1.146895 +2025-07-04 11:25:39,458 - INFO - Epoch 95/150 - Train Loss: 0.096246, Val Loss: 0.089551 +2025-07-04 11:25:41,416 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:25:41,429 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:25:41,429 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:25:41,429 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:41,429 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:41,429 - INFO - After Normalization*************************************** +2025-07-04 11:25:41,429 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:41,429 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:41,742 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:25:41,742 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:25:41,742 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:25:41,742 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:41,742 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:41,742 - INFO - After Normalization*************************************** +2025-07-04 11:25:41,743 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:41,743 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:42,032 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:25:42,032 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:25:42,032 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:25:42,032 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:42,032 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:42,032 - INFO - After Normalization*************************************** +2025-07-04 11:25:42,032 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:42,032 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:45,246 - INFO - Epoch 2/150 - Train Loss: 1.144802, Val Loss: 1.149067 +2025-07-04 11:25:47,510 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:25:47,523 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:25:47,523 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:25:47,523 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:47,523 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:47,523 - INFO - After Normalization*************************************** +2025-07-04 11:25:47,523 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:47,523 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:47,828 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:25:47,828 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:25:47,829 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:25:47,829 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:47,829 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:47,829 - INFO - After Normalization*************************************** +2025-07-04 11:25:47,829 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:47,829 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:48,117 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:25:48,118 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:25:48,118 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:25:48,118 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:48,118 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:48,118 - INFO - After Normalization*************************************** +2025-07-04 11:25:48,118 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:48,118 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:51,309 - INFO - Epoch 3/150 - Train Loss: 0.975273, Val Loss: 1.146720 +2025-07-04 11:25:51,324 - INFO - New best model saved with Val Loss: 1.146720 +2025-07-04 11:25:53,575 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:25:53,588 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:25:53,589 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:25:53,589 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:53,589 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:53,589 - INFO - After Normalization*************************************** +2025-07-04 11:25:53,589 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:53,589 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:53,900 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:25:53,900 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:25:53,901 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:25:53,901 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:53,901 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:53,901 - INFO - After Normalization*************************************** +2025-07-04 11:25:53,901 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:53,901 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:54,190 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:25:54,190 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:25:54,190 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:25:54,190 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:54,190 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:54,190 - INFO - After Normalization*************************************** +2025-07-04 11:25:54,191 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:54,191 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:57,221 - INFO - Epoch 96/150 - Train Loss: 0.095710, Val Loss: 0.088552 +2025-07-04 11:25:57,240 - INFO - New best model saved with Val Loss: 0.088552 +2025-07-04 11:25:57,415 - INFO - Epoch 4/150 - Train Loss: 0.867832, Val Loss: 1.204947 +2025-07-04 11:25:59,659 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:25:59,672 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:25:59,672 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:25:59,673 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:59,673 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:59,673 - INFO - After Normalization*************************************** +2025-07-04 11:25:59,673 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:59,673 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:59,990 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:25:59,990 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:25:59,990 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:25:59,990 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:59,990 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:59,990 - INFO - After Normalization*************************************** +2025-07-04 11:25:59,990 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:25:59,990 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:00,279 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:00,279 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:00,279 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:00,279 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:00,279 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:00,279 - INFO - After Normalization*************************************** +2025-07-04 11:26:00,279 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:00,279 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:03,503 - INFO - Epoch 5/150 - Train Loss: 0.776425, Val Loss: 1.353248 +2025-07-04 11:26:05,751 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:05,764 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:05,765 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:05,765 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:05,765 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:05,765 - INFO - After Normalization*************************************** +2025-07-04 11:26:05,765 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:05,765 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:06,075 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:06,075 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:06,075 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:06,075 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:06,076 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:06,076 - INFO - After Normalization*************************************** +2025-07-04 11:26:06,076 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:06,076 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:06,364 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:06,364 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:06,364 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:06,364 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:06,364 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:06,364 - INFO - After Normalization*************************************** +2025-07-04 11:26:06,365 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:06,365 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:09,552 - INFO - Epoch 6/150 - Train Loss: 0.683154, Val Loss: 1.512758 +2025-07-04 11:26:11,793 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:11,806 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:11,807 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:11,807 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:11,807 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:11,807 - INFO - After Normalization*************************************** +2025-07-04 11:26:11,807 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:11,807 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:12,116 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:12,116 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:12,116 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:12,116 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:12,116 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:12,117 - INFO - After Normalization*************************************** +2025-07-04 11:26:12,117 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:12,117 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:12,406 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:12,406 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:12,406 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:12,406 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:12,406 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:12,407 - INFO - After Normalization*************************************** +2025-07-04 11:26:12,407 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:12,407 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:15,027 - INFO - Epoch 97/150 - Train Loss: 0.096374, Val Loss: 0.089566 +2025-07-04 11:26:15,599 - INFO - Epoch 7/150 - Train Loss: 0.610629, Val Loss: 1.877720 +2025-07-04 11:26:17,852 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:17,865 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:17,865 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:17,865 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:17,865 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:17,866 - INFO - After Normalization*************************************** +2025-07-04 11:26:17,866 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:17,866 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:18,171 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:18,171 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:18,171 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:18,171 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:18,171 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:18,171 - INFO - After Normalization*************************************** +2025-07-04 11:26:18,172 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:18,172 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:18,461 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:18,461 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:18,461 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:18,461 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:18,461 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:18,461 - INFO - After Normalization*************************************** +2025-07-04 11:26:18,461 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:18,461 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:21,682 - INFO - Epoch 8/150 - Train Loss: 0.574927, Val Loss: 2.574531 +2025-07-04 11:26:23,956 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:23,969 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:23,969 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:23,970 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:23,970 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:23,970 - INFO - After Normalization*************************************** +2025-07-04 11:26:23,970 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:23,970 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:24,290 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:24,290 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:24,290 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:24,290 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:24,290 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:24,290 - INFO - After Normalization*************************************** +2025-07-04 11:26:24,291 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:24,291 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:24,580 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:24,580 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:24,580 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:24,580 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:24,580 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:24,581 - INFO - After Normalization*************************************** +2025-07-04 11:26:24,581 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:24,581 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:27,811 - INFO - Epoch 9/150 - Train Loss: 0.542028, Val Loss: 1.789927 +2025-07-04 11:26:30,083 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:30,097 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:30,097 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:30,097 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:30,097 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:30,097 - INFO - After Normalization*************************************** +2025-07-04 11:26:30,097 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:30,097 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:30,407 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:30,407 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:30,407 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:30,407 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:30,408 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:30,408 - INFO - After Normalization*************************************** +2025-07-04 11:26:30,408 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:30,408 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:30,696 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:30,696 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:30,697 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:30,697 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:30,697 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:30,697 - INFO - After Normalization*************************************** +2025-07-04 11:26:30,697 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:30,697 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:32,834 - INFO - Epoch 98/150 - Train Loss: 0.096752, Val Loss: 0.088469 +2025-07-04 11:26:32,849 - INFO - New best model saved with Val Loss: 0.088469 +2025-07-04 11:26:33,915 - INFO - Epoch 10/150 - Train Loss: 0.521374, Val Loss: 1.558796 +2025-07-04 11:26:36,326 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:36,339 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:36,339 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:36,339 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:36,339 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:36,339 - INFO - After Normalization*************************************** +2025-07-04 11:26:36,339 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:36,339 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:36,645 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:36,645 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:36,645 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:36,645 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:36,645 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:36,645 - INFO - After Normalization*************************************** +2025-07-04 11:26:36,645 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:36,645 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:36,934 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:36,934 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:36,934 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:36,934 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:36,934 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:36,934 - INFO - After Normalization*************************************** +2025-07-04 11:26:36,934 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:36,934 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:40,117 - INFO - Epoch 11/150 - Train Loss: 0.496060, Val Loss: 1.854632 +2025-07-04 11:26:42,376 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:42,389 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:42,389 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:42,390 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:42,390 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:42,390 - INFO - After Normalization*************************************** +2025-07-04 11:26:42,390 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:42,390 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:42,691 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:42,691 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:42,692 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:42,692 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:42,692 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:42,692 - INFO - After Normalization*************************************** +2025-07-04 11:26:42,692 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:42,692 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:42,980 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:42,980 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:42,981 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:42,981 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:42,981 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:42,981 - INFO - After Normalization*************************************** +2025-07-04 11:26:42,981 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:42,981 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:46,176 - INFO - Epoch 12/150 - Train Loss: 0.475875, Val Loss: 1.684819 +2025-07-04 11:26:48,436 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:48,449 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:48,450 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:48,451 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:48,451 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:48,451 - INFO - After Normalization*************************************** +2025-07-04 11:26:48,451 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:48,451 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:48,755 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:48,755 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:48,755 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:48,755 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:48,755 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:48,755 - INFO - After Normalization*************************************** +2025-07-04 11:26:48,756 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:48,756 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:49,044 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:49,045 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:49,045 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:49,045 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:49,045 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:49,045 - INFO - After Normalization*************************************** +2025-07-04 11:26:49,045 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:49,045 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:50,565 - INFO - Epoch 99/150 - Train Loss: 0.093261, Val Loss: 0.089550 +2025-07-04 11:26:52,248 - INFO - Epoch 13/150 - Train Loss: 0.461486, Val Loss: 0.891453 +2025-07-04 11:26:52,270 - INFO - New best model saved with Val Loss: 0.891453 +2025-07-04 11:26:54,525 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:54,540 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:54,540 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:54,541 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:54,541 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:54,541 - INFO - After Normalization*************************************** +2025-07-04 11:26:54,541 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:54,541 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:54,853 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:54,854 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:54,854 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:54,854 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:54,854 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:54,854 - INFO - After Normalization*************************************** +2025-07-04 11:26:54,854 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:54,854 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:55,142 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:26:55,143 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:26:55,143 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:26:55,143 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:55,143 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:55,144 - INFO - After Normalization*************************************** +2025-07-04 11:26:55,144 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:55,144 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:26:58,351 - INFO - Epoch 14/150 - Train Loss: 0.446934, Val Loss: 0.620117 +2025-07-04 11:26:58,365 - INFO - New best model saved with Val Loss: 0.620117 +2025-07-04 11:27:00,593 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:00,605 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:00,606 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:00,606 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:00,606 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:00,606 - INFO - After Normalization*************************************** +2025-07-04 11:27:00,606 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:00,606 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:00,914 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:00,914 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:00,915 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:00,915 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:00,915 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:00,915 - INFO - After Normalization*************************************** +2025-07-04 11:27:00,915 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:00,915 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:01,203 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:01,203 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:01,203 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:01,203 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:01,204 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:01,204 - INFO - After Normalization*************************************** +2025-07-04 11:27:01,204 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:01,204 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:04,429 - INFO - Epoch 15/150 - Train Loss: 0.428600, Val Loss: 0.572159 +2025-07-04 11:27:04,443 - INFO - New best model saved with Val Loss: 0.572159 +2025-07-04 11:27:06,681 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:06,694 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:06,694 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:06,695 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:06,695 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:06,695 - INFO - After Normalization*************************************** +2025-07-04 11:27:06,695 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:06,695 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:06,997 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:06,997 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:06,997 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:06,997 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:06,997 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:06,997 - INFO - After Normalization*************************************** +2025-07-04 11:27:06,998 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:06,998 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:07,286 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:07,286 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:07,286 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:07,287 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:07,287 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:07,287 - INFO - After Normalization*************************************** +2025-07-04 11:27:07,287 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:07,287 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:08,336 - INFO - Epoch 100/150 - Train Loss: 0.095612, Val Loss: 0.096346 +2025-07-04 11:27:10,489 - INFO - Epoch 16/150 - Train Loss: 0.421643, Val Loss: 0.504932 +2025-07-04 11:27:10,503 - INFO - New best model saved with Val Loss: 0.504932 +2025-07-04 11:27:12,757 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:12,770 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:12,770 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:12,770 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:12,770 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:12,771 - INFO - After Normalization*************************************** +2025-07-04 11:27:12,771 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:12,771 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:13,085 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:13,085 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:13,085 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:13,086 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:13,086 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:13,086 - INFO - After Normalization*************************************** +2025-07-04 11:27:13,086 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:13,086 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:13,376 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:13,377 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:13,377 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:13,377 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:13,377 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:13,377 - INFO - After Normalization*************************************** +2025-07-04 11:27:13,377 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:13,377 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:16,564 - INFO - Epoch 17/150 - Train Loss: 0.403576, Val Loss: 0.716280 +2025-07-04 11:27:18,826 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:18,838 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:18,839 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:18,839 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:18,839 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:18,839 - INFO - After Normalization*************************************** +2025-07-04 11:27:18,839 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:18,840 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:19,154 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:19,154 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:19,155 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:19,155 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:19,155 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:19,155 - INFO - After Normalization*************************************** +2025-07-04 11:27:19,155 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:19,155 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:19,446 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:19,446 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:19,446 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:19,446 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:19,446 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:19,447 - INFO - After Normalization*************************************** +2025-07-04 11:27:19,447 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:19,447 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:22,660 - INFO - Epoch 18/150 - Train Loss: 0.402675, Val Loss: 0.490075 +2025-07-04 11:27:22,674 - INFO - New best model saved with Val Loss: 0.490075 +2025-07-04 11:27:24,924 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:24,937 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:24,937 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:24,937 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:24,937 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:24,937 - INFO - After Normalization*************************************** +2025-07-04 11:27:24,937 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:24,937 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:25,244 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:25,244 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:25,244 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:25,244 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:25,244 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:25,245 - INFO - After Normalization*************************************** +2025-07-04 11:27:25,245 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:25,245 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:25,536 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:25,536 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:25,536 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:25,536 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:25,536 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:25,536 - INFO - After Normalization*************************************** +2025-07-04 11:27:25,536 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:25,536 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:26,197 - INFO - Epoch 101/150 - Train Loss: 0.096476, Val Loss: 0.128738 +2025-07-04 11:27:28,753 - INFO - Epoch 19/150 - Train Loss: 0.398048, Val Loss: 0.483450 +2025-07-04 11:27:28,768 - INFO - New best model saved with Val Loss: 0.483450 +2025-07-04 11:27:31,017 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:31,030 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:31,030 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:31,030 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:31,030 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:31,030 - INFO - After Normalization*************************************** +2025-07-04 11:27:31,031 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:31,031 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:31,331 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:31,331 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:31,331 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:31,331 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:31,331 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:31,331 - INFO - After Normalization*************************************** +2025-07-04 11:27:31,331 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:31,331 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:31,622 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:31,622 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:31,623 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:31,623 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:31,623 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:31,623 - INFO - After Normalization*************************************** +2025-07-04 11:27:31,623 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:31,623 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:34,821 - INFO - Epoch 20/150 - Train Loss: 0.380748, Val Loss: 0.511743 +2025-07-04 11:27:37,191 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:37,203 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:37,204 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:37,204 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:37,204 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:37,204 - INFO - After Normalization*************************************** +2025-07-04 11:27:37,204 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:37,204 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:37,512 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:37,512 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:37,512 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:37,513 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:37,513 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:37,513 - INFO - After Normalization*************************************** +2025-07-04 11:27:37,513 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:37,513 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:37,801 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:37,801 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:37,801 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:37,802 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:37,802 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:37,802 - INFO - After Normalization*************************************** +2025-07-04 11:27:37,802 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:37,802 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:41,009 - INFO - Epoch 21/150 - Train Loss: 0.374902, Val Loss: 1.120439 +2025-07-04 11:27:43,240 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:43,253 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:43,254 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:43,254 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:43,254 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:43,254 - INFO - After Normalization*************************************** +2025-07-04 11:27:43,254 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:43,254 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:43,560 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:43,560 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:43,560 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:43,560 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:43,560 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:43,560 - INFO - After Normalization*************************************** +2025-07-04 11:27:43,561 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:43,561 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:43,850 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:43,850 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:43,850 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:43,850 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:43,850 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:43,850 - INFO - After Normalization*************************************** +2025-07-04 11:27:43,850 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:43,850 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:43,960 - INFO - Epoch 102/150 - Train Loss: 0.096039, Val Loss: 0.090330 +2025-07-04 11:27:47,040 - INFO - Epoch 22/150 - Train Loss: 0.366255, Val Loss: 2.700198 +2025-07-04 11:27:49,303 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:49,315 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:49,316 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:49,316 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:49,316 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:49,316 - INFO - After Normalization*************************************** +2025-07-04 11:27:49,316 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:49,316 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:49,619 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:49,619 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:49,620 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:49,620 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:49,620 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:49,621 - INFO - After Normalization*************************************** +2025-07-04 11:27:49,621 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:49,621 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:49,909 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:49,909 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:49,909 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:49,909 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:49,909 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:49,910 - INFO - After Normalization*************************************** +2025-07-04 11:27:49,910 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:49,910 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:53,105 - INFO - Epoch 23/150 - Train Loss: 0.356884, Val Loss: 1.268673 +2025-07-04 11:27:55,363 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:55,376 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:55,377 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:55,377 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:55,377 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:55,377 - INFO - After Normalization*************************************** +2025-07-04 11:27:55,377 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:55,377 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:55,699 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:55,700 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:55,700 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:55,700 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:55,700 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:55,700 - INFO - After Normalization*************************************** +2025-07-04 11:27:55,700 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:55,700 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:55,988 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:27:55,988 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:27:55,989 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:27:55,989 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:55,989 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:55,989 - INFO - After Normalization*************************************** +2025-07-04 11:27:55,989 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:55,989 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:27:59,210 - INFO - Epoch 24/150 - Train Loss: 0.356916, Val Loss: 0.439105 +2025-07-04 11:27:59,225 - INFO - New best model saved with Val Loss: 0.439105 +2025-07-04 11:28:01,462 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:01,476 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:01,477 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:01,477 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:01,477 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:01,477 - INFO - After Normalization*************************************** +2025-07-04 11:28:01,477 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:01,478 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:01,744 - INFO - Epoch 103/150 - Train Loss: 0.094943, Val Loss: 0.120177 +2025-07-04 11:28:01,791 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:01,791 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:01,791 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:01,791 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:01,791 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:01,791 - INFO - After Normalization*************************************** +2025-07-04 11:28:01,791 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:01,791 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:02,081 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:02,081 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:02,081 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:02,081 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:02,081 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:02,081 - INFO - After Normalization*************************************** +2025-07-04 11:28:02,081 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:02,081 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:05,287 - INFO - Epoch 25/150 - Train Loss: 0.348590, Val Loss: 0.820585 +2025-07-04 11:28:07,530 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:07,543 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:07,543 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:07,544 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:07,544 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:07,544 - INFO - After Normalization*************************************** +2025-07-04 11:28:07,544 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:07,544 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:07,853 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:07,853 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:07,854 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:07,854 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:07,854 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:07,854 - INFO - After Normalization*************************************** +2025-07-04 11:28:07,854 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:07,854 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:08,142 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:08,142 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:08,142 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:08,143 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:08,143 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:08,143 - INFO - After Normalization*************************************** +2025-07-04 11:28:08,143 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:08,143 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:11,343 - INFO - Epoch 26/150 - Train Loss: 0.344870, Val Loss: 0.852622 +2025-07-04 11:28:13,581 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:13,595 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:13,595 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:13,595 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:13,595 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:13,596 - INFO - After Normalization*************************************** +2025-07-04 11:28:13,596 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:13,596 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:13,904 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:13,904 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:13,904 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:13,904 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:13,904 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:13,904 - INFO - After Normalization*************************************** +2025-07-04 11:28:13,904 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:13,904 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:14,192 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:14,192 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:14,193 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:14,193 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:14,193 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:14,193 - INFO - After Normalization*************************************** +2025-07-04 11:28:14,193 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:14,193 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:17,391 - INFO - Epoch 27/150 - Train Loss: 0.341391, Val Loss: 0.770657 +2025-07-04 11:28:19,522 - INFO - Epoch 104/150 - Train Loss: 0.095132, Val Loss: 0.088605 +2025-07-04 11:28:19,650 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:19,664 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:19,664 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:19,664 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:19,664 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:19,664 - INFO - After Normalization*************************************** +2025-07-04 11:28:19,664 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:19,664 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:19,969 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:19,969 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:19,969 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:19,969 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:19,969 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:19,969 - INFO - After Normalization*************************************** +2025-07-04 11:28:19,969 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:19,969 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:20,258 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:20,258 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:20,259 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:20,259 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:20,260 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:20,260 - INFO - After Normalization*************************************** +2025-07-04 11:28:20,260 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:20,260 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:23,466 - INFO - Epoch 28/150 - Train Loss: 0.332293, Val Loss: 0.407206 +2025-07-04 11:28:23,481 - INFO - New best model saved with Val Loss: 0.407206 +2025-07-04 11:28:25,744 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:25,757 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:25,757 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:25,757 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:25,757 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:25,757 - INFO - After Normalization*************************************** +2025-07-04 11:28:25,757 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:25,758 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:26,072 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:26,072 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:26,072 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:26,072 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:26,072 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:26,073 - INFO - After Normalization*************************************** +2025-07-04 11:28:26,073 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:26,073 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:26,361 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:26,361 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:26,361 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:26,361 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:26,361 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:26,361 - INFO - After Normalization*************************************** +2025-07-04 11:28:26,361 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:26,362 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:29,574 - INFO - Epoch 29/150 - Train Loss: 0.325987, Val Loss: 2.452645 +2025-07-04 11:28:31,836 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:31,850 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:31,850 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:31,850 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:31,850 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:31,850 - INFO - After Normalization*************************************** +2025-07-04 11:28:31,850 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:31,851 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:32,158 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:32,158 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:32,158 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:32,159 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:32,159 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:32,159 - INFO - After Normalization*************************************** +2025-07-04 11:28:32,159 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:32,159 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:32,448 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:32,448 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:32,449 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:32,449 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:32,449 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:32,449 - INFO - After Normalization*************************************** +2025-07-04 11:28:32,449 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:32,449 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:35,695 - INFO - Epoch 30/150 - Train Loss: 0.319199, Val Loss: 7.066501 +2025-07-04 11:28:37,289 - INFO - Epoch 105/150 - Train Loss: 0.095240, Val Loss: 0.088804 +2025-07-04 11:28:38,069 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:38,082 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:38,083 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:38,083 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:38,083 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:38,083 - INFO - After Normalization*************************************** +2025-07-04 11:28:38,083 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:38,083 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:38,390 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:38,391 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:38,391 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:38,391 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:38,391 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:38,391 - INFO - After Normalization*************************************** +2025-07-04 11:28:38,391 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:38,391 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:38,680 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:38,680 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:38,681 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:38,681 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:38,681 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:38,681 - INFO - After Normalization*************************************** +2025-07-04 11:28:38,681 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:38,681 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:41,915 - INFO - Epoch 31/150 - Train Loss: 0.321032, Val Loss: 1.561920 +2025-07-04 11:28:44,186 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:44,200 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:44,200 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:44,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:44,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:44,200 - INFO - After Normalization*************************************** +2025-07-04 11:28:44,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:44,201 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:44,512 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:44,512 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:44,512 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:44,512 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:44,512 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:44,512 - INFO - After Normalization*************************************** +2025-07-04 11:28:44,513 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:44,513 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:44,801 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:44,801 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:44,801 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:44,801 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:44,801 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:44,801 - INFO - After Normalization*************************************** +2025-07-04 11:28:44,801 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:44,801 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:48,009 - INFO - Epoch 32/150 - Train Loss: 0.321487, Val Loss: 0.346940 +2025-07-04 11:28:48,025 - INFO - New best model saved with Val Loss: 0.346940 +2025-07-04 11:28:50,294 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:50,307 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:50,308 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:50,308 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:50,308 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:50,308 - INFO - After Normalization*************************************** +2025-07-04 11:28:50,308 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:50,308 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:50,618 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:50,618 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:50,618 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:50,618 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:50,618 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:50,618 - INFO - After Normalization*************************************** +2025-07-04 11:28:50,618 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:50,618 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:50,911 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:50,911 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:50,912 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:50,912 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:50,912 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:50,912 - INFO - After Normalization*************************************** +2025-07-04 11:28:50,912 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:50,912 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:54,125 - INFO - Epoch 33/150 - Train Loss: 0.315683, Val Loss: 0.400630 +2025-07-04 11:28:55,027 - INFO - Epoch 106/150 - Train Loss: 0.095847, Val Loss: 0.092196 +2025-07-04 11:28:56,396 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:56,411 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:56,411 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:56,411 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:56,411 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:56,411 - INFO - After Normalization*************************************** +2025-07-04 11:28:56,412 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:56,412 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:56,714 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:56,714 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:56,714 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:56,714 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:56,715 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:56,715 - INFO - After Normalization*************************************** +2025-07-04 11:28:56,715 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:56,715 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:57,008 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:28:57,008 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:28:57,008 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:28:57,008 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:57,008 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:57,008 - INFO - After Normalization*************************************** +2025-07-04 11:28:57,009 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:28:57,009 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:00,220 - INFO - Epoch 34/150 - Train Loss: 0.309488, Val Loss: 0.348718 +2025-07-04 11:29:02,480 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:02,509 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:02,510 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:02,510 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:02,510 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:02,510 - INFO - After Normalization*************************************** +2025-07-04 11:29:02,510 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:02,510 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:02,828 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:02,828 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:02,828 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:02,828 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:02,828 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:02,828 - INFO - After Normalization*************************************** +2025-07-04 11:29:02,828 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:02,828 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:03,121 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:03,121 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:03,121 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:03,121 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:03,121 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:03,122 - INFO - After Normalization*************************************** +2025-07-04 11:29:03,122 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:03,122 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:06,308 - INFO - Epoch 35/150 - Train Loss: 0.303473, Val Loss: 0.380616 +2025-07-04 11:29:08,549 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:08,563 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:08,563 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:08,563 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:08,563 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:08,564 - INFO - After Normalization*************************************** +2025-07-04 11:29:08,564 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:08,564 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:08,878 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:08,878 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:08,879 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:08,879 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:08,879 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:08,879 - INFO - After Normalization*************************************** +2025-07-04 11:29:08,879 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:08,879 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:09,172 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:09,172 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:09,173 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:09,173 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:09,173 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:09,173 - INFO - After Normalization*************************************** +2025-07-04 11:29:09,173 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:09,173 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:12,356 - INFO - Epoch 36/150 - Train Loss: 0.301787, Val Loss: 0.787710 +2025-07-04 11:29:12,759 - INFO - Epoch 107/150 - Train Loss: 0.095251, Val Loss: 0.097188 +2025-07-04 11:29:14,615 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:14,629 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:14,630 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:14,630 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:14,630 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:14,630 - INFO - After Normalization*************************************** +2025-07-04 11:29:14,630 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:14,630 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:14,939 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:14,939 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:14,939 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:14,939 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:14,939 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:14,940 - INFO - After Normalization*************************************** +2025-07-04 11:29:14,940 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:14,942 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:15,236 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:15,236 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:15,236 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:15,236 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:15,236 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:15,236 - INFO - After Normalization*************************************** +2025-07-04 11:29:15,236 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:15,236 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:18,421 - INFO - Epoch 37/150 - Train Loss: 0.298016, Val Loss: 2.206627 +2025-07-04 11:29:20,688 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:20,702 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:20,702 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:20,702 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:20,702 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:20,702 - INFO - After Normalization*************************************** +2025-07-04 11:29:20,702 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:20,702 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:21,012 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:21,012 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:21,012 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:21,013 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:21,013 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:21,013 - INFO - After Normalization*************************************** +2025-07-04 11:29:21,013 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:21,013 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:21,301 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:21,301 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:21,301 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:21,301 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:21,301 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:21,302 - INFO - After Normalization*************************************** +2025-07-04 11:29:21,302 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:21,302 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:24,498 - INFO - Epoch 38/150 - Train Loss: 0.286918, Val Loss: 2.745399 +2025-07-04 11:29:26,751 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:26,766 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:26,766 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:26,766 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:26,767 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:26,767 - INFO - After Normalization*************************************** +2025-07-04 11:29:26,767 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:26,767 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:27,073 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:27,073 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:27,074 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:27,074 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:27,074 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:27,074 - INFO - After Normalization*************************************** +2025-07-04 11:29:27,074 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:27,074 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:27,363 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:27,363 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:27,363 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:27,364 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:27,364 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:27,364 - INFO - After Normalization*************************************** +2025-07-04 11:29:27,364 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:27,364 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:30,521 - INFO - Epoch 108/150 - Train Loss: 0.096115, Val Loss: 0.095816 +2025-07-04 11:29:30,587 - INFO - Epoch 39/150 - Train Loss: 0.283908, Val Loss: 1.141375 +2025-07-04 11:29:32,817 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:32,831 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:32,832 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:32,832 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:32,832 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:32,832 - INFO - After Normalization*************************************** +2025-07-04 11:29:32,832 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:32,832 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:33,145 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:33,145 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:33,145 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:33,145 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:33,145 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:33,145 - INFO - After Normalization*************************************** +2025-07-04 11:29:33,145 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:33,145 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:33,433 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:33,434 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:33,434 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:33,434 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:33,434 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:33,434 - INFO - After Normalization*************************************** +2025-07-04 11:29:33,434 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:33,434 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:36,629 - INFO - Epoch 40/150 - Train Loss: 0.286190, Val Loss: 0.318154 +2025-07-04 11:29:36,643 - INFO - New best model saved with Val Loss: 0.318154 +2025-07-04 11:29:39,001 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:39,014 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:39,015 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:39,015 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:39,015 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:39,015 - INFO - After Normalization*************************************** +2025-07-04 11:29:39,015 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:39,015 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:39,321 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:39,321 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:39,321 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:39,321 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:39,321 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:39,322 - INFO - After Normalization*************************************** +2025-07-04 11:29:39,322 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:39,322 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:39,610 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:39,610 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:39,610 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:39,610 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:39,610 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:39,610 - INFO - After Normalization*************************************** +2025-07-04 11:29:39,611 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:39,611 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:42,804 - INFO - Epoch 41/150 - Train Loss: 0.286844, Val Loss: 0.326355 +2025-07-04 11:29:45,033 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:45,047 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:45,047 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:45,047 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:45,047 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:45,047 - INFO - After Normalization*************************************** +2025-07-04 11:29:45,047 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:45,047 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:45,353 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:45,354 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:45,354 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:45,354 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:45,354 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:45,354 - INFO - After Normalization*************************************** +2025-07-04 11:29:45,354 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:45,354 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:45,643 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:45,643 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:45,644 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:45,644 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:45,644 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:45,644 - INFO - After Normalization*************************************** +2025-07-04 11:29:45,644 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:45,644 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:48,290 - INFO - Epoch 109/150 - Train Loss: 0.095379, Val Loss: 0.088785 +2025-07-04 11:29:48,869 - INFO - Epoch 42/150 - Train Loss: 0.284038, Val Loss: 0.437666 +2025-07-04 11:29:51,115 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:51,128 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:51,129 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:51,129 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:51,129 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:51,129 - INFO - After Normalization*************************************** +2025-07-04 11:29:51,129 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:51,129 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:51,451 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:51,451 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:51,451 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:51,452 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:51,452 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:51,452 - INFO - After Normalization*************************************** +2025-07-04 11:29:51,452 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:51,452 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:51,740 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:51,740 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:51,740 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:51,740 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:51,741 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:51,741 - INFO - After Normalization*************************************** +2025-07-04 11:29:51,741 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:51,741 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:54,922 - INFO - Epoch 43/150 - Train Loss: 0.287417, Val Loss: 0.862749 +2025-07-04 11:29:57,190 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:57,203 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:57,204 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:57,204 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:57,204 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:57,204 - INFO - After Normalization*************************************** +2025-07-04 11:29:57,204 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:57,204 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:57,520 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:57,520 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:57,520 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:57,520 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:57,520 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:57,520 - INFO - After Normalization*************************************** +2025-07-04 11:29:57,520 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:57,520 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:57,809 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:29:57,809 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:29:57,809 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:29:57,809 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:57,810 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:57,810 - INFO - After Normalization*************************************** +2025-07-04 11:29:57,810 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:29:57,810 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:00,966 - INFO - Epoch 44/150 - Train Loss: 0.282725, Val Loss: 0.694332 +2025-07-04 11:30:03,229 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:03,242 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:03,243 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:03,243 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:03,243 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:03,243 - INFO - After Normalization*************************************** +2025-07-04 11:30:03,243 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:03,243 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:03,557 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:03,557 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:03,558 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:03,558 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:03,558 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:03,558 - INFO - After Normalization*************************************** +2025-07-04 11:30:03,558 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:03,558 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:03,847 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:03,847 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:03,847 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:03,847 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:03,847 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:03,847 - INFO - After Normalization*************************************** +2025-07-04 11:30:03,847 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:03,847 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:06,033 - INFO - Epoch 110/150 - Train Loss: 0.094124, Val Loss: 0.086502 +2025-07-04 11:30:06,049 - INFO - New best model saved with Val Loss: 0.086502 +2025-07-04 11:30:07,056 - INFO - Epoch 45/150 - Train Loss: 0.293271, Val Loss: 0.363853 +2025-07-04 11:30:09,299 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:09,312 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:09,312 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:09,312 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:09,312 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:09,312 - INFO - After Normalization*************************************** +2025-07-04 11:30:09,312 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:09,312 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:09,620 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:09,620 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:09,620 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:09,620 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:09,620 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:09,620 - INFO - After Normalization*************************************** +2025-07-04 11:30:09,620 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:09,620 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:09,909 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:09,909 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:09,909 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:09,909 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:09,909 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:09,909 - INFO - After Normalization*************************************** +2025-07-04 11:30:09,909 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:09,909 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:13,102 - INFO - Epoch 46/150 - Train Loss: 0.283596, Val Loss: 0.348534 +2025-07-04 11:30:15,342 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:15,355 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:15,356 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:15,356 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:15,356 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:15,356 - INFO - After Normalization*************************************** +2025-07-04 11:30:15,356 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:15,356 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:15,663 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:15,663 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:15,663 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:15,663 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:15,663 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:15,663 - INFO - After Normalization*************************************** +2025-07-04 11:30:15,663 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:15,663 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:15,952 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:15,952 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:15,952 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:15,952 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:15,952 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:15,952 - INFO - After Normalization*************************************** +2025-07-04 11:30:15,952 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:15,953 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:19,146 - INFO - Epoch 47/150 - Train Loss: 0.268562, Val Loss: 0.536266 +2025-07-04 11:30:21,394 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:21,406 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:21,407 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:21,407 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:21,407 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:21,407 - INFO - After Normalization*************************************** +2025-07-04 11:30:21,407 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:21,407 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:21,711 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:21,711 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:21,711 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:21,711 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:21,711 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:21,712 - INFO - After Normalization*************************************** +2025-07-04 11:30:21,712 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:21,712 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:22,000 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:22,000 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:22,001 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:22,001 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:22,001 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:22,001 - INFO - After Normalization*************************************** +2025-07-04 11:30:22,001 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:22,001 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:23,870 - INFO - Epoch 111/150 - Train Loss: 0.093858, Val Loss: 0.087059 +2025-07-04 11:30:25,252 - INFO - Epoch 48/150 - Train Loss: 0.268272, Val Loss: 1.022365 +2025-07-04 11:30:27,510 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:27,523 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:27,523 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:27,524 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:27,524 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:27,524 - INFO - After Normalization*************************************** +2025-07-04 11:30:27,524 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:27,524 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:27,840 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:27,840 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:27,840 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:27,841 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:27,841 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:27,841 - INFO - After Normalization*************************************** +2025-07-04 11:30:27,841 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:27,841 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:28,129 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:28,129 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:28,129 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:28,129 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:28,129 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:28,130 - INFO - After Normalization*************************************** +2025-07-04 11:30:28,130 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:28,130 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:31,329 - INFO - Epoch 49/150 - Train Loss: 0.265477, Val Loss: 1.328436 +2025-07-04 11:30:33,581 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:33,595 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:33,596 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:33,596 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:33,596 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:33,596 - INFO - After Normalization*************************************** +2025-07-04 11:30:33,596 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:33,596 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:33,906 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:33,906 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:33,906 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:33,906 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:33,907 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:33,907 - INFO - After Normalization*************************************** +2025-07-04 11:30:33,907 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:33,907 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:34,195 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:34,195 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:34,195 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:34,195 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:34,196 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:34,196 - INFO - After Normalization*************************************** +2025-07-04 11:30:34,196 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:34,196 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:37,383 - INFO - Epoch 50/150 - Train Loss: 0.256372, Val Loss: 0.734287 +2025-07-04 11:30:39,772 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:39,785 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:39,786 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:39,786 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:39,786 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:39,786 - INFO - After Normalization*************************************** +2025-07-04 11:30:39,786 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:39,786 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:40,102 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:40,102 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:40,102 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:40,102 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:40,102 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:40,102 - INFO - After Normalization*************************************** +2025-07-04 11:30:40,102 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:40,102 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:40,391 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:40,391 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:40,391 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:40,391 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:40,391 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:40,392 - INFO - After Normalization*************************************** +2025-07-04 11:30:40,392 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:40,392 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:41,594 - INFO - Epoch 112/150 - Train Loss: 0.094762, Val Loss: 0.086803 +2025-07-04 11:30:43,611 - INFO - Epoch 51/150 - Train Loss: 0.254741, Val Loss: 1.214569 +2025-07-04 11:30:45,854 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:45,870 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:45,870 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:45,870 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:45,870 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:45,871 - INFO - After Normalization*************************************** +2025-07-04 11:30:45,871 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:45,871 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:46,178 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:46,178 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:46,179 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:46,179 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:46,179 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:46,179 - INFO - After Normalization*************************************** +2025-07-04 11:30:46,179 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:46,179 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:46,467 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:46,467 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:46,468 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:46,468 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:46,468 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:46,468 - INFO - After Normalization*************************************** +2025-07-04 11:30:46,468 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:46,468 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:49,663 - INFO - Epoch 52/150 - Train Loss: 0.251523, Val Loss: 0.768222 +2025-07-04 11:30:51,904 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:51,919 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:51,919 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:51,919 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:51,919 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:51,919 - INFO - After Normalization*************************************** +2025-07-04 11:30:51,919 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:51,919 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:52,226 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:52,226 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:52,227 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:52,227 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:52,227 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:52,227 - INFO - After Normalization*************************************** +2025-07-04 11:30:52,227 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:52,227 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:52,515 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:52,515 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:52,515 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:52,516 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:52,516 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:52,516 - INFO - After Normalization*************************************** +2025-07-04 11:30:52,516 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:52,516 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:55,757 - INFO - Epoch 53/150 - Train Loss: 0.239440, Val Loss: 0.476771 +2025-07-04 11:30:58,004 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:58,018 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:58,019 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:58,019 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:58,019 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:58,019 - INFO - After Normalization*************************************** +2025-07-04 11:30:58,019 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:58,019 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:58,319 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:58,319 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:58,320 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:58,320 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:58,320 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:58,320 - INFO - After Normalization*************************************** +2025-07-04 11:30:58,320 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:58,320 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:58,613 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:30:58,613 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:30:58,613 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:30:58,614 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:58,614 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:58,614 - INFO - After Normalization*************************************** +2025-07-04 11:30:58,614 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:58,614 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:30:59,352 - INFO - Epoch 113/150 - Train Loss: 0.094220, Val Loss: 0.086305 +2025-07-04 11:30:59,367 - INFO - New best model saved with Val Loss: 0.086305 +2025-07-04 11:31:01,837 - INFO - Epoch 54/150 - Train Loss: 0.240818, Val Loss: 0.354884 +2025-07-04 11:31:04,099 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:04,113 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:04,113 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:04,113 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:04,113 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:04,113 - INFO - After Normalization*************************************** +2025-07-04 11:31:04,114 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:04,114 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:04,427 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:04,427 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:04,427 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:04,427 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:04,427 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:04,427 - INFO - After Normalization*************************************** +2025-07-04 11:31:04,427 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:04,427 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:04,720 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:04,720 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:04,721 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:04,721 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:04,721 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:04,721 - INFO - After Normalization*************************************** +2025-07-04 11:31:04,721 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:04,721 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:07,910 - INFO - Epoch 55/150 - Train Loss: 0.240019, Val Loss: 0.299964 +2025-07-04 11:31:07,926 - INFO - New best model saved with Val Loss: 0.299964 +2025-07-04 11:31:10,191 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:10,206 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:10,206 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:10,206 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:10,206 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:10,206 - INFO - After Normalization*************************************** +2025-07-04 11:31:10,206 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:10,206 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:10,512 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:10,512 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:10,513 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:10,513 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:10,513 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:10,513 - INFO - After Normalization*************************************** +2025-07-04 11:31:10,513 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:10,513 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:10,807 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:10,807 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:10,807 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:10,807 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:10,807 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:10,807 - INFO - After Normalization*************************************** +2025-07-04 11:31:10,807 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:10,807 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:14,014 - INFO - Epoch 56/150 - Train Loss: 0.238060, Val Loss: 0.274806 +2025-07-04 11:31:14,030 - INFO - New best model saved with Val Loss: 0.274806 +2025-07-04 11:31:16,268 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:16,282 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:16,283 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:16,283 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:16,283 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:16,283 - INFO - After Normalization*************************************** +2025-07-04 11:31:16,283 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:16,283 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:16,585 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:16,585 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:16,585 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:16,585 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:16,585 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:16,585 - INFO - After Normalization*************************************** +2025-07-04 11:31:16,585 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:16,585 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:16,879 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:16,879 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:16,879 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:16,879 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:16,879 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:16,879 - INFO - After Normalization*************************************** +2025-07-04 11:31:16,879 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:16,879 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:17,098 - INFO - Epoch 114/150 - Train Loss: 0.093979, Val Loss: 0.086554 +2025-07-04 11:31:20,072 - INFO - Epoch 57/150 - Train Loss: 0.236272, Val Loss: 0.257384 +2025-07-04 11:31:20,087 - INFO - New best model saved with Val Loss: 0.257384 +2025-07-04 11:31:22,348 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:22,361 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:22,362 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:22,362 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:22,362 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:22,362 - INFO - After Normalization*************************************** +2025-07-04 11:31:22,362 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:22,362 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:22,680 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:22,681 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:22,681 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:22,681 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:22,681 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:22,681 - INFO - After Normalization*************************************** +2025-07-04 11:31:22,681 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:22,681 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:22,974 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:22,974 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:22,974 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:22,974 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:22,974 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:22,974 - INFO - After Normalization*************************************** +2025-07-04 11:31:22,974 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:22,974 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:26,155 - INFO - Epoch 58/150 - Train Loss: 0.239714, Val Loss: 0.249452 +2025-07-04 11:31:26,169 - INFO - New best model saved with Val Loss: 0.249452 +2025-07-04 11:31:28,419 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:28,433 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:28,434 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:28,434 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:28,434 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:28,434 - INFO - After Normalization*************************************** +2025-07-04 11:31:28,434 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:28,434 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:28,747 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:28,747 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:28,747 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:28,748 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:28,748 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:28,748 - INFO - After Normalization*************************************** +2025-07-04 11:31:28,748 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:28,748 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:29,041 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:29,041 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:29,041 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:29,041 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:29,041 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:29,041 - INFO - After Normalization*************************************** +2025-07-04 11:31:29,041 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:29,041 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:32,280 - INFO - Epoch 59/150 - Train Loss: 0.234231, Val Loss: 0.246667 +2025-07-04 11:31:32,295 - INFO - New best model saved with Val Loss: 0.246667 +2025-07-04 11:31:34,548 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:34,562 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:34,562 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:34,562 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:34,562 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:34,562 - INFO - After Normalization*************************************** +2025-07-04 11:31:34,562 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:34,562 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:34,810 - INFO - Epoch 115/150 - Train Loss: 0.093502, Val Loss: 0.086534 +2025-07-04 11:31:34,869 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:34,869 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:34,870 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:34,870 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:34,870 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:34,870 - INFO - After Normalization*************************************** +2025-07-04 11:31:34,870 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:34,870 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:35,159 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:35,159 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:35,160 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:35,160 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:35,160 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:35,160 - INFO - After Normalization*************************************** +2025-07-04 11:31:35,160 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:35,160 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:38,340 - INFO - Epoch 60/150 - Train Loss: 0.234758, Val Loss: 0.247478 +2025-07-04 11:31:40,717 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:40,731 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:40,731 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:40,731 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:40,731 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:40,731 - INFO - After Normalization*************************************** +2025-07-04 11:31:40,731 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:40,731 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:41,042 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:41,042 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:41,042 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:41,042 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:41,042 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:41,042 - INFO - After Normalization*************************************** +2025-07-04 11:31:41,042 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:41,043 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:41,331 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:41,331 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:41,331 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:41,331 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:41,331 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:41,331 - INFO - After Normalization*************************************** +2025-07-04 11:31:41,331 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:41,331 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:44,539 - INFO - Epoch 61/150 - Train Loss: 0.233135, Val Loss: 0.252457 +2025-07-04 11:31:46,795 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:46,809 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:46,809 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:46,809 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:46,809 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:46,809 - INFO - After Normalization*************************************** +2025-07-04 11:31:46,809 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:46,809 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:47,116 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:47,116 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:47,117 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:47,117 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:47,117 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:47,117 - INFO - After Normalization*************************************** +2025-07-04 11:31:47,117 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:47,117 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:47,405 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:47,405 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:47,406 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:47,406 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:47,406 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:47,406 - INFO - After Normalization*************************************** +2025-07-04 11:31:47,406 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:47,406 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:50,593 - INFO - Epoch 62/150 - Train Loss: 0.232894, Val Loss: 0.257394 +2025-07-04 11:31:52,554 - INFO - Epoch 116/150 - Train Loss: 0.093854, Val Loss: 0.086689 +2025-07-04 11:31:52,833 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:52,847 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:52,847 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:52,847 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:52,847 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:52,848 - INFO - After Normalization*************************************** +2025-07-04 11:31:52,848 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:52,848 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:53,153 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:53,153 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:53,154 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:53,154 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:53,154 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:53,154 - INFO - After Normalization*************************************** +2025-07-04 11:31:53,154 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:53,154 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:53,443 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:53,443 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:53,443 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:53,444 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:53,444 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:53,444 - INFO - After Normalization*************************************** +2025-07-04 11:31:53,444 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:53,444 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:56,647 - INFO - Epoch 63/150 - Train Loss: 0.233125, Val Loss: 0.255208 +2025-07-04 11:31:58,899 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:58,912 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:58,912 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:58,912 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:58,912 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:58,912 - INFO - After Normalization*************************************** +2025-07-04 11:31:58,912 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:58,912 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:59,221 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:59,221 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:59,222 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:59,222 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:59,222 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:59,222 - INFO - After Normalization*************************************** +2025-07-04 11:31:59,222 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:59,222 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:59,510 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:31:59,510 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:31:59,511 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:31:59,511 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:59,511 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:59,511 - INFO - After Normalization*************************************** +2025-07-04 11:31:59,511 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:31:59,511 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:02,706 - INFO - Epoch 64/150 - Train Loss: 0.234421, Val Loss: 0.255270 +2025-07-04 11:32:04,950 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:04,963 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:04,964 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:04,964 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:04,964 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:04,964 - INFO - After Normalization*************************************** +2025-07-04 11:32:04,964 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:04,964 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:05,282 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:05,282 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:05,282 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:05,282 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:05,282 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:05,283 - INFO - After Normalization*************************************** +2025-07-04 11:32:05,283 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:05,283 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:05,571 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:05,571 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:05,571 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:05,571 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:05,571 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:05,572 - INFO - After Normalization*************************************** +2025-07-04 11:32:05,572 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:05,572 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:08,788 - INFO - Epoch 65/150 - Train Loss: 0.234516, Val Loss: 0.252954 +2025-07-04 11:32:10,308 - INFO - Epoch 117/150 - Train Loss: 0.093443, Val Loss: 0.086520 +2025-07-04 11:32:11,032 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:11,046 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:11,046 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:11,046 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:11,046 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:11,046 - INFO - After Normalization*************************************** +2025-07-04 11:32:11,047 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:11,047 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:11,357 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:11,357 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:11,358 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:11,358 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:11,358 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:11,358 - INFO - After Normalization*************************************** +2025-07-04 11:32:11,358 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:11,358 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:11,647 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:11,647 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:11,648 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:11,648 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:11,648 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:11,648 - INFO - After Normalization*************************************** +2025-07-04 11:32:11,648 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:11,648 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:14,826 - INFO - Epoch 66/150 - Train Loss: 0.231194, Val Loss: 0.250868 +2025-07-04 11:32:17,087 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:17,101 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:17,101 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:17,101 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:17,101 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:17,101 - INFO - After Normalization*************************************** +2025-07-04 11:32:17,101 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:17,102 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:17,409 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:17,409 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:17,410 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:17,410 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:17,410 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:17,410 - INFO - After Normalization*************************************** +2025-07-04 11:32:17,410 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:17,410 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:17,698 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:17,698 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:17,699 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:17,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:17,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:17,699 - INFO - After Normalization*************************************** +2025-07-04 11:32:17,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:17,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:20,899 - INFO - Epoch 67/150 - Train Loss: 0.230656, Val Loss: 0.250073 +2025-07-04 11:32:23,161 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:23,174 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:23,175 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:23,175 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:23,175 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:23,175 - INFO - After Normalization*************************************** +2025-07-04 11:32:23,175 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:23,175 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:23,479 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:23,479 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:23,479 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:23,479 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:23,479 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:23,479 - INFO - After Normalization*************************************** +2025-07-04 11:32:23,479 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:23,479 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:23,768 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:23,768 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:23,768 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:23,768 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:23,768 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:23,768 - INFO - After Normalization*************************************** +2025-07-04 11:32:23,768 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:23,768 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:26,967 - INFO - Epoch 68/150 - Train Loss: 0.229500, Val Loss: 0.250125 +2025-07-04 11:32:28,042 - INFO - Epoch 118/150 - Train Loss: 0.093456, Val Loss: 0.086879 +2025-07-04 11:32:29,246 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:29,259 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:29,260 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:29,260 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:29,260 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:29,260 - INFO - After Normalization*************************************** +2025-07-04 11:32:29,260 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:29,260 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:29,565 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:29,565 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:29,566 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:29,566 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:29,566 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:29,566 - INFO - After Normalization*************************************** +2025-07-04 11:32:29,566 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:29,566 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:29,854 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:29,854 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:29,855 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:29,855 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:29,855 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:29,855 - INFO - After Normalization*************************************** +2025-07-04 11:32:29,855 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:29,855 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:33,066 - INFO - Epoch 69/150 - Train Loss: 0.230819, Val Loss: 0.247301 +2025-07-04 11:32:35,298 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:35,311 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:35,312 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:35,312 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:35,312 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:35,312 - INFO - After Normalization*************************************** +2025-07-04 11:32:35,312 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:35,312 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:35,630 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:35,630 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:35,630 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:35,630 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:35,630 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:35,630 - INFO - After Normalization*************************************** +2025-07-04 11:32:35,630 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:35,630 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:35,919 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:35,919 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:35,919 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:35,919 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:35,919 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:35,919 - INFO - After Normalization*************************************** +2025-07-04 11:32:35,919 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:35,919 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:39,114 - INFO - Epoch 70/150 - Train Loss: 0.228859, Val Loss: 0.245401 +2025-07-04 11:32:39,127 - INFO - New best model saved with Val Loss: 0.245401 +2025-07-04 11:32:41,486 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:41,499 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:41,500 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:41,500 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:41,500 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:41,500 - INFO - After Normalization*************************************** +2025-07-04 11:32:41,500 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:41,500 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:41,801 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:41,801 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:41,801 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:41,801 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:41,801 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:41,802 - INFO - After Normalization*************************************** +2025-07-04 11:32:41,802 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:41,802 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:42,091 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:42,091 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:42,091 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:42,091 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:42,091 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:42,092 - INFO - After Normalization*************************************** +2025-07-04 11:32:42,092 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:42,092 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:45,302 - INFO - Epoch 71/150 - Train Loss: 0.228805, Val Loss: 0.245921 +2025-07-04 11:32:45,800 - INFO - Epoch 119/150 - Train Loss: 0.093429, Val Loss: 0.086754 +2025-07-04 11:32:47,558 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:47,572 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:47,572 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:47,572 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:47,572 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:47,572 - INFO - After Normalization*************************************** +2025-07-04 11:32:47,572 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:47,572 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:47,890 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:47,890 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:47,890 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:47,890 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:47,890 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:47,890 - INFO - After Normalization*************************************** +2025-07-04 11:32:47,890 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:47,890 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:48,182 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:48,183 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:48,183 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:48,183 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:48,183 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:48,183 - INFO - After Normalization*************************************** +2025-07-04 11:32:48,183 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:48,183 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:51,373 - INFO - Epoch 72/150 - Train Loss: 0.229246, Val Loss: 0.244591 +2025-07-04 11:32:51,388 - INFO - New best model saved with Val Loss: 0.244591 +2025-07-04 11:32:53,625 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:53,638 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:53,639 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:53,639 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:53,639 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:53,639 - INFO - After Normalization*************************************** +2025-07-04 11:32:53,639 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:53,639 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:53,952 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:53,952 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:53,952 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:53,952 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:53,952 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:53,952 - INFO - After Normalization*************************************** +2025-07-04 11:32:53,952 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:53,952 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:54,244 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:54,244 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:54,244 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:54,244 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:54,244 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:54,244 - INFO - After Normalization*************************************** +2025-07-04 11:32:54,244 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:54,244 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:57,452 - INFO - Epoch 73/150 - Train Loss: 0.228147, Val Loss: 0.242999 +2025-07-04 11:32:57,466 - INFO - New best model saved with Val Loss: 0.242999 +2025-07-04 11:32:59,726 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:32:59,740 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:32:59,740 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:32:59,740 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:59,740 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:59,740 - INFO - After Normalization*************************************** +2025-07-04 11:32:59,740 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:32:59,740 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:00,045 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:00,046 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:00,046 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:00,046 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:00,046 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:00,046 - INFO - After Normalization*************************************** +2025-07-04 11:33:00,046 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:00,046 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:00,338 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:00,338 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:00,339 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:00,339 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:00,339 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:00,339 - INFO - After Normalization*************************************** +2025-07-04 11:33:00,339 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:00,339 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:03,541 - INFO - Epoch 120/150 - Train Loss: 0.093038, Val Loss: 0.086636 +2025-07-04 11:33:03,573 - INFO - Epoch 74/150 - Train Loss: 0.227171, Val Loss: 0.242266 +2025-07-04 11:33:03,588 - INFO - New best model saved with Val Loss: 0.242266 +2025-07-04 11:33:05,845 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:05,859 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:05,859 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:05,859 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:05,859 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:05,859 - INFO - After Normalization*************************************** +2025-07-04 11:33:05,859 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:05,859 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:06,178 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:06,178 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:06,178 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:06,178 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:06,178 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:06,179 - INFO - After Normalization*************************************** +2025-07-04 11:33:06,179 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:06,179 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:06,471 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:06,471 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:06,471 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:06,471 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:06,471 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:06,471 - INFO - After Normalization*************************************** +2025-07-04 11:33:06,471 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:06,471 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:09,689 - INFO - Epoch 75/150 - Train Loss: 0.227594, Val Loss: 0.240315 +2025-07-04 11:33:09,703 - INFO - New best model saved with Val Loss: 0.240315 +2025-07-04 11:33:11,955 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:11,968 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:11,969 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:11,969 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:11,969 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:11,969 - INFO - After Normalization*************************************** +2025-07-04 11:33:11,969 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:11,969 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:12,277 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:12,277 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:12,277 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:12,277 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:12,277 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:12,277 - INFO - After Normalization*************************************** +2025-07-04 11:33:12,277 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:12,277 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:12,569 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:12,569 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:12,569 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:12,569 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:12,569 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:12,569 - INFO - After Normalization*************************************** +2025-07-04 11:33:12,569 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:12,570 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:15,769 - INFO - Epoch 76/150 - Train Loss: 0.226053, Val Loss: 0.238202 +2025-07-04 11:33:15,783 - INFO - New best model saved with Val Loss: 0.238202 +2025-07-04 11:33:18,039 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:18,053 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:18,053 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:18,053 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:18,053 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:18,053 - INFO - After Normalization*************************************** +2025-07-04 11:33:18,053 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:18,053 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:18,359 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:18,359 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:18,359 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:18,359 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:18,359 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:18,359 - INFO - After Normalization*************************************** +2025-07-04 11:33:18,359 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:18,359 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:18,648 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:18,648 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:18,649 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:18,649 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:18,649 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:18,649 - INFO - After Normalization*************************************** +2025-07-04 11:33:18,649 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:18,649 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:21,376 - INFO - Epoch 121/150 - Train Loss: 0.093188, Val Loss: 0.086384 +2025-07-04 11:33:21,848 - INFO - Epoch 77/150 - Train Loss: 0.225167, Val Loss: 0.238969 +2025-07-04 11:33:24,091 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:24,104 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:24,105 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:24,105 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:24,105 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:24,105 - INFO - After Normalization*************************************** +2025-07-04 11:33:24,105 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:24,105 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:24,410 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:24,410 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:24,410 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:24,410 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:24,410 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:24,411 - INFO - After Normalization*************************************** +2025-07-04 11:33:24,411 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:24,411 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:24,699 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:24,699 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:24,699 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:24,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:24,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:24,699 - INFO - After Normalization*************************************** +2025-07-04 11:33:24,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:24,700 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:27,884 - INFO - Epoch 78/150 - Train Loss: 0.231253, Val Loss: 0.240782 +2025-07-04 11:33:30,132 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:30,146 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:30,146 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:30,147 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:30,147 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:30,147 - INFO - After Normalization*************************************** +2025-07-04 11:33:30,147 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:30,147 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:30,467 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:30,467 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:30,467 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:30,467 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:30,467 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:30,467 - INFO - After Normalization*************************************** +2025-07-04 11:33:30,467 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:30,467 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:30,756 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:30,756 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:30,756 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:30,756 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:30,756 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:30,756 - INFO - After Normalization*************************************** +2025-07-04 11:33:30,756 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:30,756 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:33,938 - INFO - Epoch 79/150 - Train Loss: 0.223478, Val Loss: 0.243409 +2025-07-04 11:33:36,198 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:36,213 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:36,213 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:36,213 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:36,213 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:36,213 - INFO - After Normalization*************************************** +2025-07-04 11:33:36,213 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:36,213 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:36,533 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:36,533 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:36,533 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:36,533 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:36,533 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:36,533 - INFO - After Normalization*************************************** +2025-07-04 11:33:36,533 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:36,533 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:36,822 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:36,822 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:36,823 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:36,823 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:36,823 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:36,823 - INFO - After Normalization*************************************** +2025-07-04 11:33:36,823 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:36,823 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:39,097 - INFO - Epoch 122/150 - Train Loss: 0.093174, Val Loss: 0.086662 +2025-07-04 11:33:40,045 - INFO - Epoch 80/150 - Train Loss: 0.224525, Val Loss: 0.241048 +2025-07-04 11:33:42,411 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:42,425 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:42,426 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:42,426 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:42,426 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:42,426 - INFO - After Normalization*************************************** +2025-07-04 11:33:42,426 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:42,426 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:42,747 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:42,747 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:42,747 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:42,747 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:42,748 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:42,748 - INFO - After Normalization*************************************** +2025-07-04 11:33:42,748 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:42,748 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:43,036 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:43,036 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:43,036 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:43,036 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:43,036 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:43,036 - INFO - After Normalization*************************************** +2025-07-04 11:33:43,037 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:43,037 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:46,226 - INFO - Epoch 81/150 - Train Loss: 0.224605, Val Loss: 0.241788 +2025-07-04 11:33:48,470 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:48,486 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:48,487 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:48,487 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:48,487 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:48,487 - INFO - After Normalization*************************************** +2025-07-04 11:33:48,487 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:48,487 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:48,799 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:48,800 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:48,800 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:48,800 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:48,800 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:48,800 - INFO - After Normalization*************************************** +2025-07-04 11:33:48,800 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:48,800 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:49,088 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:49,088 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:49,089 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:49,089 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:49,089 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:49,089 - INFO - After Normalization*************************************** +2025-07-04 11:33:49,089 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:49,089 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:52,291 - INFO - Epoch 82/150 - Train Loss: 0.224534, Val Loss: 0.241378 +2025-07-04 11:33:54,558 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:54,572 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:54,572 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:54,572 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:54,572 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:54,572 - INFO - After Normalization*************************************** +2025-07-04 11:33:54,572 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:54,572 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:54,879 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:54,879 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:54,880 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:54,880 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:54,880 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:54,880 - INFO - After Normalization*************************************** +2025-07-04 11:33:54,880 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:54,880 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:55,169 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:33:55,169 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:33:55,169 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:33:55,169 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:55,169 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:55,169 - INFO - After Normalization*************************************** +2025-07-04 11:33:55,169 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:55,170 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:33:56,840 - INFO - Epoch 123/150 - Train Loss: 0.093458, Val Loss: 0.086432 +2025-07-04 11:33:58,391 - INFO - Epoch 83/150 - Train Loss: 0.225927, Val Loss: 0.239900 +2025-07-04 11:34:00,645 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:00,660 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:00,660 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:00,660 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:00,660 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:00,660 - INFO - After Normalization*************************************** +2025-07-04 11:34:00,660 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:00,660 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:00,962 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:00,962 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:00,962 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:00,962 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:00,962 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:00,962 - INFO - After Normalization*************************************** +2025-07-04 11:34:00,963 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:00,963 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:01,251 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:01,251 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:01,251 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:01,251 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:01,251 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:01,251 - INFO - After Normalization*************************************** +2025-07-04 11:34:01,251 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:01,251 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:04,444 - INFO - Epoch 84/150 - Train Loss: 0.222113, Val Loss: 0.241609 +2025-07-04 11:34:06,680 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:06,694 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:06,694 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:06,694 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:06,694 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:06,694 - INFO - After Normalization*************************************** +2025-07-04 11:34:06,694 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:06,694 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:06,994 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:06,994 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:06,994 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:06,995 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:06,995 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:06,995 - INFO - After Normalization*************************************** +2025-07-04 11:34:06,995 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:06,995 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:07,283 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:07,283 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:07,284 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:07,284 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:07,284 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:07,284 - INFO - After Normalization*************************************** +2025-07-04 11:34:07,284 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:07,284 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:10,482 - INFO - Epoch 85/150 - Train Loss: 0.222664, Val Loss: 0.241702 +2025-07-04 11:34:12,750 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:12,764 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:12,764 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:12,764 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:12,764 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:12,764 - INFO - After Normalization*************************************** +2025-07-04 11:34:12,764 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:12,764 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:13,082 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:13,082 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:13,082 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:13,082 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:13,083 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:13,083 - INFO - After Normalization*************************************** +2025-07-04 11:34:13,083 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:13,083 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:13,371 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:13,371 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:13,372 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:13,372 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:13,372 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:13,372 - INFO - After Normalization*************************************** +2025-07-04 11:34:13,372 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:13,372 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:14,607 - INFO - Epoch 124/150 - Train Loss: 0.093784, Val Loss: 0.086574 +2025-07-04 11:34:16,587 - INFO - Epoch 86/150 - Train Loss: 0.222005, Val Loss: 0.240705 +2025-07-04 11:34:18,806 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:18,820 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:18,820 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:18,821 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:18,821 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:18,821 - INFO - After Normalization*************************************** +2025-07-04 11:34:18,821 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:18,821 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:19,133 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:19,133 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:19,134 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:19,134 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:19,134 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:19,134 - INFO - After Normalization*************************************** +2025-07-04 11:34:19,134 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:19,134 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:19,422 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:19,422 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:19,423 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:19,423 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:19,423 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:19,423 - INFO - After Normalization*************************************** +2025-07-04 11:34:19,423 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:19,423 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:22,632 - INFO - Epoch 87/150 - Train Loss: 0.221814, Val Loss: 0.236728 +2025-07-04 11:34:22,648 - INFO - New best model saved with Val Loss: 0.236728 +2025-07-04 11:34:24,913 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:24,927 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:24,928 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:24,928 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:24,928 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:24,928 - INFO - After Normalization*************************************** +2025-07-04 11:34:24,928 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:24,928 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:25,232 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:25,232 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:25,232 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:25,233 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:25,233 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:25,233 - INFO - After Normalization*************************************** +2025-07-04 11:34:25,233 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:25,233 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:25,521 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:25,521 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:25,521 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:25,521 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:25,521 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:25,522 - INFO - After Normalization*************************************** +2025-07-04 11:34:25,522 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:25,522 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:28,745 - INFO - Epoch 88/150 - Train Loss: 0.222914, Val Loss: 0.237586 +2025-07-04 11:34:30,986 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:31,000 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:31,001 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:31,001 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:31,001 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:31,001 - INFO - After Normalization*************************************** +2025-07-04 11:34:31,001 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:31,001 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:31,301 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:31,301 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:31,301 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:31,301 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:31,301 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:31,301 - INFO - After Normalization*************************************** +2025-07-04 11:34:31,301 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:31,302 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:31,590 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:31,590 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:31,590 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:31,590 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:31,590 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:31,590 - INFO - After Normalization*************************************** +2025-07-04 11:34:31,591 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:31,591 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:32,343 - INFO - Epoch 125/150 - Train Loss: 0.093038, Val Loss: 0.086059 +2025-07-04 11:34:32,358 - INFO - New best model saved with Val Loss: 0.086059 +2025-07-04 11:34:34,826 - INFO - Epoch 89/150 - Train Loss: 0.223983, Val Loss: 0.233450 +2025-07-04 11:34:34,839 - INFO - New best model saved with Val Loss: 0.233450 +2025-07-04 11:34:37,089 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:37,103 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:37,103 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:37,103 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:37,103 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:37,103 - INFO - After Normalization*************************************** +2025-07-04 11:34:37,103 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:37,103 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:37,416 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:37,416 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:37,416 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:37,416 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:37,416 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:37,416 - INFO - After Normalization*************************************** +2025-07-04 11:34:37,416 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:37,416 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:37,705 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:37,705 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:37,705 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:37,705 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:37,705 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:37,705 - INFO - After Normalization*************************************** +2025-07-04 11:34:37,705 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:37,705 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:40,884 - INFO - Epoch 90/150 - Train Loss: 0.224280, Val Loss: 0.233574 +2025-07-04 11:34:43,248 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:43,261 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:43,262 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:43,262 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:43,262 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:43,262 - INFO - After Normalization*************************************** +2025-07-04 11:34:43,262 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:43,262 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:43,562 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:43,562 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:43,562 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:43,563 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:43,563 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:43,563 - INFO - After Normalization*************************************** +2025-07-04 11:34:43,563 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:43,563 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:43,854 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:43,854 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:43,855 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:43,855 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:43,855 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:43,855 - INFO - After Normalization*************************************** +2025-07-04 11:34:43,855 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:43,855 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:47,065 - INFO - Epoch 91/150 - Train Loss: 0.221463, Val Loss: 0.233713 +2025-07-04 11:34:49,305 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:49,318 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:49,318 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:49,318 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:49,318 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:49,318 - INFO - After Normalization*************************************** +2025-07-04 11:34:49,318 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:49,319 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:49,637 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:49,638 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:49,638 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:49,638 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:49,638 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:49,638 - INFO - After Normalization*************************************** +2025-07-04 11:34:49,638 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:49,638 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:49,931 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:49,931 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:49,931 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:49,932 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:49,932 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:49,932 - INFO - After Normalization*************************************** +2025-07-04 11:34:49,932 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:49,932 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:50,092 - INFO - Epoch 126/150 - Train Loss: 0.091458, Val Loss: 0.086364 +2025-07-04 11:34:53,127 - INFO - Epoch 92/150 - Train Loss: 0.224099, Val Loss: 0.235924 +2025-07-04 11:34:55,378 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:55,391 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:55,392 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:55,392 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:55,392 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:55,392 - INFO - After Normalization*************************************** +2025-07-04 11:34:55,392 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:55,392 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:55,705 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:55,705 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:55,705 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:55,705 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:55,705 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:55,705 - INFO - After Normalization*************************************** +2025-07-04 11:34:55,705 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:55,705 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:55,998 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:34:55,998 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:34:55,998 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:34:55,998 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:55,998 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:55,998 - INFO - After Normalization*************************************** +2025-07-04 11:34:55,998 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:55,998 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:34:59,197 - INFO - Epoch 93/150 - Train Loss: 0.224064, Val Loss: 0.235446 +2025-07-04 11:35:01,437 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:01,451 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:01,451 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:01,451 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:01,451 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:01,451 - INFO - After Normalization*************************************** +2025-07-04 11:35:01,452 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:01,452 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:01,760 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:01,760 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:01,761 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:01,761 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:01,761 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:01,761 - INFO - After Normalization*************************************** +2025-07-04 11:35:01,761 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:01,761 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:02,053 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:02,053 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:02,053 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:02,053 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:02,053 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:02,053 - INFO - After Normalization*************************************** +2025-07-04 11:35:02,054 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:02,054 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:05,287 - INFO - Epoch 94/150 - Train Loss: 0.218898, Val Loss: 0.233914 +2025-07-04 11:35:07,544 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:07,558 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:07,558 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:07,559 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:07,559 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:07,559 - INFO - After Normalization*************************************** +2025-07-04 11:35:07,559 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:07,559 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:07,856 - INFO - Epoch 127/150 - Train Loss: 0.092800, Val Loss: 0.086594 +2025-07-04 11:35:07,861 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:07,861 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:07,861 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:07,861 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:07,862 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:07,862 - INFO - After Normalization*************************************** +2025-07-04 11:35:07,862 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:07,862 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:08,155 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:08,155 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:08,155 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:08,155 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:08,155 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:08,155 - INFO - After Normalization*************************************** +2025-07-04 11:35:08,155 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:08,156 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:11,347 - INFO - Epoch 95/150 - Train Loss: 0.219660, Val Loss: 0.235059 +2025-07-04 11:35:13,605 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:13,618 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:13,618 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:13,618 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:13,618 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:13,618 - INFO - After Normalization*************************************** +2025-07-04 11:35:13,618 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:13,618 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:13,936 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:13,949 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:13,966 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:13,975 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:13,987 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:13,996 - INFO - After Normalization*************************************** +2025-07-04 11:35:14,006 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:14,015 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:14,316 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:14,316 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:14,316 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:14,316 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:14,317 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:14,317 - INFO - After Normalization*************************************** +2025-07-04 11:35:14,317 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:14,317 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:17,489 - INFO - Epoch 96/150 - Train Loss: 0.219717, Val Loss: 0.233484 +2025-07-04 11:35:19,749 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:19,763 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:19,763 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:19,763 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:19,763 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:19,763 - INFO - After Normalization*************************************** +2025-07-04 11:35:19,763 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:19,763 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:20,075 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:20,075 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:20,075 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:20,075 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:20,075 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:20,075 - INFO - After Normalization*************************************** +2025-07-04 11:35:20,075 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:20,075 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:20,364 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:20,364 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:20,364 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:20,364 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:20,364 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:20,364 - INFO - After Normalization*************************************** +2025-07-04 11:35:20,364 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:20,364 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:23,569 - INFO - Epoch 97/150 - Train Loss: 0.220275, Val Loss: 0.234563 +2025-07-04 11:35:25,590 - INFO - Epoch 128/150 - Train Loss: 0.093453, Val Loss: 0.086473 +2025-07-04 11:35:25,813 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:25,827 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:25,827 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:25,827 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:25,827 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:25,828 - INFO - After Normalization*************************************** +2025-07-04 11:35:25,828 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:25,828 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:26,136 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:26,136 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:26,136 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:26,136 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:26,136 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:26,137 - INFO - After Normalization*************************************** +2025-07-04 11:35:26,137 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:26,137 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:26,425 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:26,426 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:26,426 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:26,426 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:26,426 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:26,426 - INFO - After Normalization*************************************** +2025-07-04 11:35:26,426 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:26,426 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:29,637 - INFO - Epoch 98/150 - Train Loss: 0.218811, Val Loss: 0.234070 +2025-07-04 11:35:31,893 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:31,906 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:31,906 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:31,907 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:31,907 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:31,907 - INFO - After Normalization*************************************** +2025-07-04 11:35:31,907 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:31,907 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:32,211 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:32,211 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:32,212 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:32,212 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:32,212 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:32,212 - INFO - After Normalization*************************************** +2025-07-04 11:35:32,212 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:32,212 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:32,500 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:32,500 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:32,501 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:32,501 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:32,501 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:32,501 - INFO - After Normalization*************************************** +2025-07-04 11:35:32,501 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:32,501 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:35,697 - INFO - Epoch 99/150 - Train Loss: 0.216731, Val Loss: 0.233418 +2025-07-04 11:35:35,712 - INFO - New best model saved with Val Loss: 0.233418 +2025-07-04 11:35:37,957 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:37,970 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:37,971 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:37,971 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:37,971 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:37,971 - INFO - After Normalization*************************************** +2025-07-04 11:35:37,971 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:37,971 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:38,291 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:38,291 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:38,291 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:38,291 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:38,291 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:38,291 - INFO - After Normalization*************************************** +2025-07-04 11:35:38,291 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:38,291 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:38,580 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:38,580 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:38,580 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:38,580 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:38,580 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:38,580 - INFO - After Normalization*************************************** +2025-07-04 11:35:38,580 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:38,580 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:41,807 - INFO - Epoch 100/150 - Train Loss: 0.215245, Val Loss: 0.231929 +2025-07-04 11:35:41,821 - INFO - New best model saved with Val Loss: 0.231929 +2025-07-04 11:35:43,346 - INFO - Epoch 129/150 - Train Loss: 0.092883, Val Loss: 0.086414 +2025-07-04 11:35:44,195 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:44,209 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:44,209 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:44,209 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:44,209 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:44,209 - INFO - After Normalization*************************************** +2025-07-04 11:35:44,209 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:44,210 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:44,526 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:44,526 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:44,527 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:44,527 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:44,527 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:44,527 - INFO - After Normalization*************************************** +2025-07-04 11:35:44,527 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:44,527 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:44,816 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:44,816 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:44,817 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:44,817 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:44,817 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:44,817 - INFO - After Normalization*************************************** +2025-07-04 11:35:44,817 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:44,817 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:47,997 - INFO - Epoch 101/150 - Train Loss: 0.220793, Val Loss: 0.232915 +2025-07-04 11:35:50,264 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:50,277 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:50,277 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:50,278 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:50,278 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:50,278 - INFO - After Normalization*************************************** +2025-07-04 11:35:50,278 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:50,278 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:50,588 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:50,588 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:50,589 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:50,589 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:50,589 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:50,589 - INFO - After Normalization*************************************** +2025-07-04 11:35:50,589 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:50,589 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:50,877 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:50,877 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:50,877 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:50,878 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:50,878 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:50,878 - INFO - After Normalization*************************************** +2025-07-04 11:35:50,878 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:50,878 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:54,067 - INFO - Epoch 102/150 - Train Loss: 0.215615, Val Loss: 0.231711 +2025-07-04 11:35:54,082 - INFO - New best model saved with Val Loss: 0.231711 +2025-07-04 11:35:56,342 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:56,356 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:56,356 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:56,356 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:56,356 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:56,356 - INFO - After Normalization*************************************** +2025-07-04 11:35:56,356 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:56,356 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:56,662 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:56,662 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:56,662 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:56,662 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:56,662 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:56,663 - INFO - After Normalization*************************************** +2025-07-04 11:35:56,663 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:56,663 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:56,952 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:35:56,952 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:35:56,952 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:35:56,952 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:56,952 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:56,952 - INFO - After Normalization*************************************** +2025-07-04 11:35:56,952 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:35:56,952 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:00,154 - INFO - Epoch 103/150 - Train Loss: 0.215830, Val Loss: 0.233838 +2025-07-04 11:36:01,069 - INFO - Epoch 130/150 - Train Loss: 0.093339, Val Loss: 0.086173 +2025-07-04 11:36:02,403 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:02,417 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:02,417 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:02,418 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:02,418 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:02,418 - INFO - After Normalization*************************************** +2025-07-04 11:36:02,418 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:02,418 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:02,720 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:02,720 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:02,721 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:02,721 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:02,721 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:02,721 - INFO - After Normalization*************************************** +2025-07-04 11:36:02,721 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:02,721 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:03,009 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:03,009 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:03,010 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:03,010 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:03,010 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:03,010 - INFO - After Normalization*************************************** +2025-07-04 11:36:03,010 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:03,010 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:06,229 - INFO - Epoch 104/150 - Train Loss: 0.219100, Val Loss: 0.238959 +2025-07-04 11:36:08,479 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:08,492 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:08,493 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:08,493 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:08,493 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:08,493 - INFO - After Normalization*************************************** +2025-07-04 11:36:08,493 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:08,493 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:08,803 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:08,803 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:08,803 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:08,803 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:08,803 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:08,803 - INFO - After Normalization*************************************** +2025-07-04 11:36:08,803 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:08,803 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:09,092 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:09,092 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:09,092 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:09,092 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:09,092 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:09,093 - INFO - After Normalization*************************************** +2025-07-04 11:36:09,093 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:09,093 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:12,290 - INFO - Epoch 105/150 - Train Loss: 0.217095, Val Loss: 0.235791 +2025-07-04 11:36:14,555 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:14,568 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:14,569 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:14,569 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:14,569 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:14,569 - INFO - After Normalization*************************************** +2025-07-04 11:36:14,569 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:14,569 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:14,884 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:14,884 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:14,885 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:14,885 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:14,885 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:14,885 - INFO - After Normalization*************************************** +2025-07-04 11:36:14,885 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:14,885 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:15,174 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:15,174 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:15,174 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:15,174 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:15,175 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:15,175 - INFO - After Normalization*************************************** +2025-07-04 11:36:15,175 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:15,175 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:18,382 - INFO - Epoch 106/150 - Train Loss: 0.219045, Val Loss: 0.238134 +2025-07-04 11:36:18,921 - INFO - Epoch 131/150 - Train Loss: 0.093019, Val Loss: 0.086383 +2025-07-04 11:36:20,668 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:20,681 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:20,682 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:20,682 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:20,682 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:20,682 - INFO - After Normalization*************************************** +2025-07-04 11:36:20,682 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:20,682 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:20,987 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:20,987 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:20,987 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:20,987 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:20,987 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:20,987 - INFO - After Normalization*************************************** +2025-07-04 11:36:20,987 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:20,987 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:21,276 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:21,276 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:21,277 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:21,277 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:21,277 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:21,277 - INFO - After Normalization*************************************** +2025-07-04 11:36:21,277 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:21,277 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:24,485 - INFO - Epoch 107/150 - Train Loss: 0.217866, Val Loss: 0.237858 +2025-07-04 11:36:26,757 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:26,770 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:26,771 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:26,771 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:26,771 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:26,771 - INFO - After Normalization*************************************** +2025-07-04 11:36:26,771 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:26,771 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:27,073 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:27,073 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:27,074 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:27,074 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:27,074 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:27,074 - INFO - After Normalization*************************************** +2025-07-04 11:36:27,074 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:27,074 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:27,362 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:27,362 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:27,362 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:27,362 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:27,362 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:27,363 - INFO - After Normalization*************************************** +2025-07-04 11:36:27,363 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:27,363 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:30,595 - INFO - Epoch 108/150 - Train Loss: 0.213142, Val Loss: 0.232590 +2025-07-04 11:36:32,851 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:32,864 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:32,865 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:32,865 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:32,865 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:32,865 - INFO - After Normalization*************************************** +2025-07-04 11:36:32,865 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:32,865 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:33,178 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:33,178 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:33,179 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:33,179 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:33,179 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:33,179 - INFO - After Normalization*************************************** +2025-07-04 11:36:33,179 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:33,179 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:33,468 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:33,468 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:33,468 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:33,468 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:33,468 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:33,468 - INFO - After Normalization*************************************** +2025-07-04 11:36:33,468 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:33,468 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:36,675 - INFO - Epoch 132/150 - Train Loss: 0.093502, Val Loss: 0.086444 +2025-07-04 11:36:36,725 - INFO - Epoch 109/150 - Train Loss: 0.215017, Val Loss: 0.234218 +2025-07-04 11:36:38,983 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:38,996 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:38,997 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:38,997 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:38,997 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:38,997 - INFO - After Normalization*************************************** +2025-07-04 11:36:38,997 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:38,997 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:39,299 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:39,299 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:39,299 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:39,299 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:39,299 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:39,300 - INFO - After Normalization*************************************** +2025-07-04 11:36:39,300 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:39,300 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:39,592 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:39,592 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:39,592 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:39,592 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:39,592 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:39,592 - INFO - After Normalization*************************************** +2025-07-04 11:36:39,592 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:39,592 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:42,789 - INFO - Epoch 110/150 - Train Loss: 0.216315, Val Loss: 0.234832 +2025-07-04 11:36:46,653 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:46,667 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:46,667 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:46,667 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:46,667 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:46,667 - INFO - After Normalization*************************************** +2025-07-04 11:36:46,667 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:46,667 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:46,974 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:46,974 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:46,974 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:46,974 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:46,974 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:46,974 - INFO - After Normalization*************************************** +2025-07-04 11:36:46,974 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:46,974 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:47,263 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:47,263 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:47,263 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:47,263 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:47,263 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:47,263 - INFO - After Normalization*************************************** +2025-07-04 11:36:47,264 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:47,264 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:50,487 - INFO - Epoch 111/150 - Train Loss: 0.212657, Val Loss: 0.228432 +2025-07-04 11:36:50,502 - INFO - New best model saved with Val Loss: 0.228432 +2025-07-04 11:36:52,785 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:52,798 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:52,799 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:52,799 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:52,799 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:52,799 - INFO - After Normalization*************************************** +2025-07-04 11:36:52,799 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:52,799 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:53,107 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:53,107 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:53,107 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:53,107 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:53,107 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:53,107 - INFO - After Normalization*************************************** +2025-07-04 11:36:53,107 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:53,107 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:53,396 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:53,396 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:53,396 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:53,396 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:53,396 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:53,397 - INFO - After Normalization*************************************** +2025-07-04 11:36:53,397 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:53,397 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:54,482 - INFO - Epoch 133/150 - Train Loss: 0.093330, Val Loss: 0.086222 +2025-07-04 11:36:56,599 - INFO - Epoch 112/150 - Train Loss: 0.213089, Val Loss: 0.229737 +2025-07-04 11:36:58,853 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:58,867 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:58,867 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:58,867 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:58,867 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:58,867 - INFO - After Normalization*************************************** +2025-07-04 11:36:58,868 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:58,868 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:59,173 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:59,173 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:59,173 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:59,173 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:59,173 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:59,173 - INFO - After Normalization*************************************** +2025-07-04 11:36:59,173 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:59,173 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:59,462 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:36:59,462 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:36:59,462 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:36:59,462 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:59,462 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:59,462 - INFO - After Normalization*************************************** +2025-07-04 11:36:59,462 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:36:59,462 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:02,671 - INFO - Epoch 113/150 - Train Loss: 0.216253, Val Loss: 0.234988 +2025-07-04 11:37:04,924 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:04,937 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:04,937 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:04,937 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:04,937 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:04,937 - INFO - After Normalization*************************************** +2025-07-04 11:37:04,937 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:04,937 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:05,238 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:05,238 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:05,239 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:05,239 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:05,239 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:05,239 - INFO - After Normalization*************************************** +2025-07-04 11:37:05,239 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:05,239 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:05,527 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:05,527 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:05,528 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:05,528 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:05,528 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:05,528 - INFO - After Normalization*************************************** +2025-07-04 11:37:05,528 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:05,528 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:08,753 - INFO - Epoch 114/150 - Train Loss: 0.210370, Val Loss: 0.231040 +2025-07-04 11:37:11,020 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:11,034 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:11,034 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:11,034 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:11,035 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:11,035 - INFO - After Normalization*************************************** +2025-07-04 11:37:11,035 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:11,035 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:11,349 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:11,349 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:11,349 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:11,349 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:11,350 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:11,350 - INFO - After Normalization*************************************** +2025-07-04 11:37:11,350 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:11,350 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:11,638 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:11,638 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:11,639 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:11,639 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:11,639 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:11,639 - INFO - After Normalization*************************************** +2025-07-04 11:37:11,639 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:11,639 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:12,236 - INFO - Epoch 134/150 - Train Loss: 0.093427, Val Loss: 0.086623 +2025-07-04 11:37:14,950 - INFO - Epoch 115/150 - Train Loss: 0.215257, Val Loss: 0.229911 +2025-07-04 11:37:17,186 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:17,200 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:17,200 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:17,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:17,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:17,200 - INFO - After Normalization*************************************** +2025-07-04 11:37:17,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:17,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:17,500 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:17,500 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:17,500 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:17,500 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:17,500 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:17,501 - INFO - After Normalization*************************************** +2025-07-04 11:37:17,501 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:17,501 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:17,800 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:17,800 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:17,801 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:17,801 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:17,801 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:17,801 - INFO - After Normalization*************************************** +2025-07-04 11:37:17,801 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:17,801 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:20,991 - INFO - Epoch 116/150 - Train Loss: 0.210573, Val Loss: 0.230565 +2025-07-04 11:37:23,260 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:23,273 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:23,274 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:23,274 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:23,274 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:23,274 - INFO - After Normalization*************************************** +2025-07-04 11:37:23,274 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:23,274 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:23,589 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:23,589 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:23,590 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:23,590 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:23,590 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:23,590 - INFO - After Normalization*************************************** +2025-07-04 11:37:23,590 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:23,590 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:23,881 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:23,881 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:23,882 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:23,882 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:23,882 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:23,882 - INFO - After Normalization*************************************** +2025-07-04 11:37:23,882 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:23,882 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:27,088 - INFO - Epoch 117/150 - Train Loss: 0.214040, Val Loss: 0.228656 +2025-07-04 11:37:29,335 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:29,358 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:29,358 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:29,358 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:29,358 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:29,358 - INFO - After Normalization*************************************** +2025-07-04 11:37:29,358 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:29,358 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:29,667 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:29,667 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:29,667 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:29,667 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:29,667 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:29,667 - INFO - After Normalization*************************************** +2025-07-04 11:37:29,667 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:29,667 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:29,960 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:29,960 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:29,961 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:29,961 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:29,961 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:29,961 - INFO - After Normalization*************************************** +2025-07-04 11:37:29,961 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:29,961 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:29,974 - INFO - Epoch 135/150 - Train Loss: 0.093817, Val Loss: 0.086278 +2025-07-04 11:37:33,163 - INFO - Epoch 118/150 - Train Loss: 0.211064, Val Loss: 0.229791 +2025-07-04 11:37:35,403 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:35,425 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:35,425 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:35,425 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:35,425 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:35,426 - INFO - After Normalization*************************************** +2025-07-04 11:37:35,426 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:35,426 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:35,731 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:35,731 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:35,731 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:35,731 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:35,731 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:35,731 - INFO - After Normalization*************************************** +2025-07-04 11:37:35,731 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:35,732 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:36,023 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:36,023 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:36,024 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:36,024 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:36,024 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:36,024 - INFO - After Normalization*************************************** +2025-07-04 11:37:36,024 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:36,024 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:39,223 - INFO - Epoch 119/150 - Train Loss: 0.218802, Val Loss: 0.228340 +2025-07-04 11:37:39,237 - INFO - New best model saved with Val Loss: 0.228340 +2025-07-04 11:37:41,474 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:41,487 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:41,487 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:41,488 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:41,488 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:41,488 - INFO - After Normalization*************************************** +2025-07-04 11:37:41,488 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:41,488 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:41,790 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:41,790 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:41,790 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:41,790 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:41,790 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:41,790 - INFO - After Normalization*************************************** +2025-07-04 11:37:41,790 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:41,790 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:42,082 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:42,082 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:42,082 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:42,082 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:42,082 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:42,082 - INFO - After Normalization*************************************** +2025-07-04 11:37:42,082 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:42,082 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:45,301 - INFO - Epoch 120/150 - Train Loss: 0.211176, Val Loss: 0.227035 +2025-07-04 11:37:45,315 - INFO - New best model saved with Val Loss: 0.227035 +2025-07-04 11:37:47,671 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:47,685 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:47,685 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:47,686 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:47,686 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:47,686 - INFO - After Normalization*************************************** +2025-07-04 11:37:47,686 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:47,686 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:47,735 - INFO - Epoch 136/150 - Train Loss: 0.093156, Val Loss: 0.086409 +2025-07-04 11:37:47,995 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:47,995 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:47,995 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:47,995 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:47,995 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:47,996 - INFO - After Normalization*************************************** +2025-07-04 11:37:47,996 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:47,996 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:48,285 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:48,285 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:48,285 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:48,285 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:48,285 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:48,285 - INFO - After Normalization*************************************** +2025-07-04 11:37:48,285 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:48,286 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:51,490 - INFO - Epoch 121/150 - Train Loss: 0.209712, Val Loss: 0.227454 +2025-07-04 11:37:53,752 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:53,766 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:53,766 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:53,766 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:53,766 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:53,767 - INFO - After Normalization*************************************** +2025-07-04 11:37:53,767 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:53,767 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:54,069 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:54,069 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:54,070 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:54,070 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:54,070 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:54,070 - INFO - After Normalization*************************************** +2025-07-04 11:37:54,070 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:54,070 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:54,358 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:54,358 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:54,358 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:54,358 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:54,359 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:54,359 - INFO - After Normalization*************************************** +2025-07-04 11:37:54,359 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:54,359 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:57,532 - INFO - Epoch 122/150 - Train Loss: 0.210517, Val Loss: 0.229289 +2025-07-04 11:37:59,789 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:37:59,803 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:37:59,803 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:37:59,803 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:59,804 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:59,804 - INFO - After Normalization*************************************** +2025-07-04 11:37:59,804 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:37:59,804 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:00,111 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:00,111 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:00,112 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:00,112 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:00,112 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:00,112 - INFO - After Normalization*************************************** +2025-07-04 11:38:00,112 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:00,112 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:00,400 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:00,400 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:00,401 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:00,401 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:00,401 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:00,401 - INFO - After Normalization*************************************** +2025-07-04 11:38:00,401 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:00,401 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:03,582 - INFO - Epoch 123/150 - Train Loss: 0.206994, Val Loss: 0.232915 +2025-07-04 11:38:05,487 - INFO - Epoch 137/150 - Train Loss: 0.092966, Val Loss: 0.086330 +2025-07-04 11:38:05,834 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:05,847 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:05,848 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:05,848 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:05,848 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:05,848 - INFO - After Normalization*************************************** +2025-07-04 11:38:05,848 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:05,848 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:06,191 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:06,191 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:06,191 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:06,191 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:06,192 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:06,192 - INFO - After Normalization*************************************** +2025-07-04 11:38:06,192 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:06,192 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:06,481 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:06,481 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:06,481 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:06,482 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:06,482 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:06,482 - INFO - After Normalization*************************************** +2025-07-04 11:38:06,482 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:06,482 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:09,683 - INFO - Epoch 124/150 - Train Loss: 0.210682, Val Loss: 0.232058 +2025-07-04 11:38:11,920 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:11,934 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:11,935 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:11,935 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:11,935 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:11,935 - INFO - After Normalization*************************************** +2025-07-04 11:38:11,935 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:11,935 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:12,250 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:12,251 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:12,251 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:12,251 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:12,251 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:12,251 - INFO - After Normalization*************************************** +2025-07-04 11:38:12,251 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:12,251 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:12,540 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:12,540 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:12,540 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:12,540 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:12,540 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:12,540 - INFO - After Normalization*************************************** +2025-07-04 11:38:12,540 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:12,540 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:18,806 - INFO - Epoch 125/150 - Train Loss: 0.210332, Val Loss: 0.235467 +2025-07-04 11:38:21,072 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:21,086 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:21,086 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:21,086 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:21,086 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:21,086 - INFO - After Normalization*************************************** +2025-07-04 11:38:21,086 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:21,086 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:21,387 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:21,387 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:21,388 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:21,388 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:21,388 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:21,388 - INFO - After Normalization*************************************** +2025-07-04 11:38:21,388 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:21,388 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:21,677 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:21,677 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:21,677 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:21,677 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:21,677 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:21,677 - INFO - After Normalization*************************************** +2025-07-04 11:38:21,678 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:21,678 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:23,315 - INFO - Epoch 138/150 - Train Loss: 0.093484, Val Loss: 0.086202 +2025-07-04 11:38:24,920 - INFO - Epoch 126/150 - Train Loss: 0.208929, Val Loss: 0.232208 +2025-07-04 11:38:27,157 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:27,170 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:27,171 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:27,171 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:27,171 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:27,171 - INFO - After Normalization*************************************** +2025-07-04 11:38:27,171 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:27,171 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:27,488 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:27,488 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:27,489 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:27,489 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:27,489 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:27,489 - INFO - After Normalization*************************************** +2025-07-04 11:38:27,489 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:27,489 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:27,777 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:27,777 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:27,778 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:27,778 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:27,778 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:27,778 - INFO - After Normalization*************************************** +2025-07-04 11:38:27,778 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:27,778 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:30,973 - INFO - Epoch 127/150 - Train Loss: 0.206713, Val Loss: 0.226560 +2025-07-04 11:38:30,989 - INFO - New best model saved with Val Loss: 0.226560 +2025-07-04 11:38:33,240 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:33,253 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:33,254 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:33,254 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:33,254 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:33,254 - INFO - After Normalization*************************************** +2025-07-04 11:38:33,254 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:33,254 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:33,563 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:33,563 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:33,564 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:33,564 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:33,564 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:33,564 - INFO - After Normalization*************************************** +2025-07-04 11:38:33,564 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:33,564 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:33,852 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:33,852 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:33,853 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:33,853 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:33,853 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:33,853 - INFO - After Normalization*************************************** +2025-07-04 11:38:33,853 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:33,853 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:37,071 - INFO - Epoch 128/150 - Train Loss: 0.209242, Val Loss: 0.223407 +2025-07-04 11:38:37,085 - INFO - New best model saved with Val Loss: 0.223407 +2025-07-04 11:38:39,353 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:39,367 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:39,367 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:39,367 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:39,367 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:39,367 - INFO - After Normalization*************************************** +2025-07-04 11:38:39,367 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:39,367 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:39,669 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:39,670 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:39,670 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:39,670 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:39,670 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:39,670 - INFO - After Normalization*************************************** +2025-07-04 11:38:39,670 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:39,670 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:39,959 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:39,959 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:39,959 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:39,959 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:39,959 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:39,959 - INFO - After Normalization*************************************** +2025-07-04 11:38:39,959 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:39,959 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:41,090 - INFO - Epoch 139/150 - Train Loss: 0.092633, Val Loss: 0.086555 +2025-07-04 11:38:43,171 - INFO - Epoch 129/150 - Train Loss: 0.207208, Val Loss: 0.222295 +2025-07-04 11:38:43,185 - INFO - New best model saved with Val Loss: 0.222295 +2025-07-04 11:38:45,438 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:45,452 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:45,452 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:45,452 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:45,452 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:45,452 - INFO - After Normalization*************************************** +2025-07-04 11:38:45,452 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:45,452 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:45,769 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:45,769 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:45,770 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:45,770 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:45,770 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:45,770 - INFO - After Normalization*************************************** +2025-07-04 11:38:45,770 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:45,770 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:46,058 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:46,058 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:46,059 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:46,059 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:46,059 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:46,059 - INFO - After Normalization*************************************** +2025-07-04 11:38:46,059 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:46,059 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:49,253 - INFO - Epoch 130/150 - Train Loss: 0.206944, Val Loss: 0.225768 +2025-07-04 11:38:51,623 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:51,639 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:51,639 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:51,639 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:51,639 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:51,639 - INFO - After Normalization*************************************** +2025-07-04 11:38:51,639 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:51,639 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:51,939 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:51,939 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:51,940 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:51,940 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:51,940 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:51,940 - INFO - After Normalization*************************************** +2025-07-04 11:38:51,940 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:51,940 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:52,231 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:52,231 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:52,232 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:52,232 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:52,232 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:52,232 - INFO - After Normalization*************************************** +2025-07-04 11:38:52,232 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:52,232 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:55,448 - INFO - Epoch 131/150 - Train Loss: 0.205182, Val Loss: 0.224567 +2025-07-04 11:38:57,670 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:57,683 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:57,684 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:57,684 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:57,684 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:57,684 - INFO - After Normalization*************************************** +2025-07-04 11:38:57,684 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:57,684 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:57,995 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:57,995 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:57,995 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:57,995 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:57,995 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:57,995 - INFO - After Normalization*************************************** +2025-07-04 11:38:57,995 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:57,995 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:58,288 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:38:58,288 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:38:58,288 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:38:58,288 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:58,288 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:58,288 - INFO - After Normalization*************************************** +2025-07-04 11:38:58,288 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:58,288 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:38:58,900 - INFO - Epoch 140/150 - Train Loss: 0.092953, Val Loss: 0.086549 +2025-07-04 11:39:01,522 - INFO - Epoch 132/150 - Train Loss: 0.207266, Val Loss: 0.224274 +2025-07-04 11:39:03,780 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:03,793 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:03,794 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:03,794 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:03,794 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:03,794 - INFO - After Normalization*************************************** +2025-07-04 11:39:03,794 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:03,794 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:04,103 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:04,103 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:04,104 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:04,104 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:04,104 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:04,104 - INFO - After Normalization*************************************** +2025-07-04 11:39:04,104 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:04,104 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:04,397 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:04,397 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:04,397 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:04,397 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:04,397 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:04,398 - INFO - After Normalization*************************************** +2025-07-04 11:39:04,398 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:04,398 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:07,594 - INFO - Epoch 133/150 - Train Loss: 0.204924, Val Loss: 0.225970 +2025-07-04 11:39:09,840 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:09,854 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:09,855 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:09,855 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:09,855 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:09,855 - INFO - After Normalization*************************************** +2025-07-04 11:39:09,855 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:09,855 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:10,162 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:10,163 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:10,163 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:10,163 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:10,163 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:10,163 - INFO - After Normalization*************************************** +2025-07-04 11:39:10,163 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:10,163 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:10,455 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:10,455 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:10,455 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:10,455 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:10,455 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:10,455 - INFO - After Normalization*************************************** +2025-07-04 11:39:10,455 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:10,455 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:13,668 - INFO - Epoch 134/150 - Train Loss: 0.204168, Val Loss: 0.226186 +2025-07-04 11:39:15,897 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:15,910 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:15,911 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:15,911 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:15,911 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:15,911 - INFO - After Normalization*************************************** +2025-07-04 11:39:15,911 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:15,911 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:16,216 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:16,216 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:16,216 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:16,216 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:16,216 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:16,217 - INFO - After Normalization*************************************** +2025-07-04 11:39:16,217 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:16,217 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:16,509 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:16,509 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:16,509 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:16,509 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:16,509 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:16,509 - INFO - After Normalization*************************************** +2025-07-04 11:39:16,510 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:16,510 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:16,744 - INFO - Epoch 141/150 - Train Loss: 0.092855, Val Loss: 0.086349 +2025-07-04 11:39:19,728 - INFO - Epoch 135/150 - Train Loss: 0.205579, Val Loss: 0.229163 +2025-07-04 11:39:21,978 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:21,991 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:21,992 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:21,992 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:21,992 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:21,992 - INFO - After Normalization*************************************** +2025-07-04 11:39:21,992 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:21,992 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:22,292 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:22,292 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:22,292 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:22,292 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:22,292 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:22,292 - INFO - After Normalization*************************************** +2025-07-04 11:39:22,292 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:22,292 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:22,584 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:22,585 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:22,585 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:22,585 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:22,585 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:22,585 - INFO - After Normalization*************************************** +2025-07-04 11:39:22,585 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:22,585 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:25,771 - INFO - Epoch 136/150 - Train Loss: 0.206300, Val Loss: 0.230606 +2025-07-04 11:39:28,012 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:28,026 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:28,026 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:28,026 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:28,026 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:28,026 - INFO - After Normalization*************************************** +2025-07-04 11:39:28,026 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:28,026 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:28,328 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:28,328 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:28,328 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:28,328 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:28,328 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:28,328 - INFO - After Normalization*************************************** +2025-07-04 11:39:28,328 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:28,328 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:28,620 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:28,620 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:28,621 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:28,621 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:28,621 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:28,621 - INFO - After Normalization*************************************** +2025-07-04 11:39:28,621 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:28,621 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:31,845 - INFO - Epoch 137/150 - Train Loss: 0.204792, Val Loss: 0.228642 +2025-07-04 11:39:34,065 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:34,079 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:34,079 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:34,080 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:34,080 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:34,080 - INFO - After Normalization*************************************** +2025-07-04 11:39:34,080 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:34,080 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:34,402 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:34,402 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:34,402 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:34,402 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:34,402 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:34,402 - INFO - After Normalization*************************************** +2025-07-04 11:39:34,402 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:34,402 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:34,495 - INFO - Epoch 142/150 - Train Loss: 0.093116, Val Loss: 0.086281 +2025-07-04 11:39:34,692 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:34,692 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:34,692 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:34,692 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:34,692 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:34,692 - INFO - After Normalization*************************************** +2025-07-04 11:39:34,692 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:34,692 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:37,883 - INFO - Epoch 138/150 - Train Loss: 0.204796, Val Loss: 0.232180 +2025-07-04 11:39:40,129 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:40,141 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:40,142 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:40,142 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:40,142 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:40,142 - INFO - After Normalization*************************************** +2025-07-04 11:39:40,142 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:40,142 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:40,459 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:40,459 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:40,459 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:40,459 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:40,459 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:40,459 - INFO - After Normalization*************************************** +2025-07-04 11:39:40,459 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:40,459 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:40,748 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:40,748 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:40,748 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:40,748 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:40,748 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:40,748 - INFO - After Normalization*************************************** +2025-07-04 11:39:40,748 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:40,748 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:43,944 - INFO - Epoch 139/150 - Train Loss: 0.205599, Val Loss: 0.224674 +2025-07-04 11:39:46,177 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:46,190 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:46,190 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:46,190 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:46,190 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:46,191 - INFO - After Normalization*************************************** +2025-07-04 11:39:46,191 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:46,191 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:46,504 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:46,504 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:46,504 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:46,505 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:46,505 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:46,505 - INFO - After Normalization*************************************** +2025-07-04 11:39:46,505 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:46,505 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:46,793 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:46,793 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:46,793 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:46,793 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:46,793 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:46,794 - INFO - After Normalization*************************************** +2025-07-04 11:39:46,794 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:46,794 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:50,000 - INFO - Epoch 140/150 - Train Loss: 0.204527, Val Loss: 0.222321 +2025-07-04 11:39:52,236 - INFO - Epoch 143/150 - Train Loss: 0.093006, Val Loss: 0.086377 +2025-07-04 11:39:52,356 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:52,370 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:52,370 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:52,370 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:52,370 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:52,370 - INFO - After Normalization*************************************** +2025-07-04 11:39:52,370 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:52,370 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:52,692 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:52,692 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:52,692 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:52,692 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:52,692 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:52,692 - INFO - After Normalization*************************************** +2025-07-04 11:39:52,692 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:52,692 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:52,982 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:52,982 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:52,982 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:52,982 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:52,982 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:52,982 - INFO - After Normalization*************************************** +2025-07-04 11:39:52,982 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:52,982 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:56,191 - INFO - Epoch 141/150 - Train Loss: 0.204573, Val Loss: 0.222382 +2025-07-04 11:39:58,429 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:58,443 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:58,443 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:58,443 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:58,443 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:58,444 - INFO - After Normalization*************************************** +2025-07-04 11:39:58,444 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:58,444 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:58,759 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:58,759 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:58,759 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:58,759 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:58,759 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:58,760 - INFO - After Normalization*************************************** +2025-07-04 11:39:58,760 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:58,760 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:59,048 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:39:59,048 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:39:59,048 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:39:59,048 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:59,048 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:59,049 - INFO - After Normalization*************************************** +2025-07-04 11:39:59,049 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:39:59,049 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:02,244 - INFO - Epoch 142/150 - Train Loss: 0.202772, Val Loss: 0.222352 +2025-07-04 11:40:04,506 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:04,519 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:04,520 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:04,520 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:04,520 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:04,520 - INFO - After Normalization*************************************** +2025-07-04 11:40:04,520 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:04,520 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:04,830 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:04,830 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:04,831 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:04,831 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:04,831 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:04,831 - INFO - After Normalization*************************************** +2025-07-04 11:40:04,831 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:04,831 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:05,119 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:05,119 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:05,120 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:05,120 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:05,120 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:05,120 - INFO - After Normalization*************************************** +2025-07-04 11:40:05,120 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:05,120 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:08,337 - INFO - Epoch 143/150 - Train Loss: 0.201713, Val Loss: 0.222547 +2025-07-04 11:40:09,976 - INFO - Epoch 144/150 - Train Loss: 0.092932, Val Loss: 0.086349 +2025-07-04 11:40:10,595 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:10,607 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:10,608 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:10,608 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:10,608 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:10,608 - INFO - After Normalization*************************************** +2025-07-04 11:40:10,608 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:10,608 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:10,909 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:10,909 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:10,910 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:10,910 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:10,910 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:10,910 - INFO - After Normalization*************************************** +2025-07-04 11:40:10,910 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:10,910 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:11,199 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:11,199 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:11,200 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:11,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:11,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:11,200 - INFO - After Normalization*************************************** +2025-07-04 11:40:11,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:11,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:14,403 - INFO - Epoch 144/150 - Train Loss: 0.205784, Val Loss: 0.221948 +2025-07-04 11:40:14,419 - INFO - New best model saved with Val Loss: 0.221948 +2025-07-04 11:40:16,663 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:16,677 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:16,677 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:16,677 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:16,677 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:16,677 - INFO - After Normalization*************************************** +2025-07-04 11:40:16,677 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:16,677 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:16,996 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:16,996 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:16,996 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:16,996 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:16,997 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:16,997 - INFO - After Normalization*************************************** +2025-07-04 11:40:16,997 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:16,997 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:17,285 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:17,285 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:17,286 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:17,286 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:17,286 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:17,286 - INFO - After Normalization*************************************** +2025-07-04 11:40:17,286 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:17,286 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:20,484 - INFO - Epoch 145/150 - Train Loss: 0.204412, Val Loss: 0.221852 +2025-07-04 11:40:20,497 - INFO - New best model saved with Val Loss: 0.221852 +2025-07-04 11:40:22,746 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:22,759 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:22,760 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:22,760 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:22,760 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:22,760 - INFO - After Normalization*************************************** +2025-07-04 11:40:22,760 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:22,760 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:23,071 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:23,071 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:23,071 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:23,071 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:23,071 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:23,072 - INFO - After Normalization*************************************** +2025-07-04 11:40:23,072 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:23,072 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:23,360 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:23,360 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:23,360 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:23,360 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:23,360 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:23,360 - INFO - After Normalization*************************************** +2025-07-04 11:40:23,360 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:23,361 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:26,573 - INFO - Epoch 146/150 - Train Loss: 0.205469, Val Loss: 0.220326 +2025-07-04 11:40:26,586 - INFO - New best model saved with Val Loss: 0.220326 +2025-07-04 11:40:27,726 - INFO - Epoch 145/150 - Train Loss: 0.092390, Val Loss: 0.086420 +2025-07-04 11:40:28,868 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:28,883 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:28,883 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:28,883 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:28,883 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:28,883 - INFO - After Normalization*************************************** +2025-07-04 11:40:28,883 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:28,883 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:29,185 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:29,186 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:29,186 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:29,186 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:29,186 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:29,186 - INFO - After Normalization*************************************** +2025-07-04 11:40:29,186 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:29,186 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:29,475 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:29,475 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:29,475 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:29,475 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:29,475 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:29,475 - INFO - After Normalization*************************************** +2025-07-04 11:40:29,475 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:29,476 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:32,703 - INFO - Epoch 147/150 - Train Loss: 0.204432, Val Loss: 0.220514 +2025-07-04 11:40:34,967 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:34,980 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:34,980 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:34,981 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:34,981 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:34,981 - INFO - After Normalization*************************************** +2025-07-04 11:40:34,981 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:34,981 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:35,297 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:35,297 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:35,297 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:35,297 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:35,297 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:35,297 - INFO - After Normalization*************************************** +2025-07-04 11:40:35,297 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:35,297 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:35,586 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:35,586 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:35,586 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:35,586 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:35,586 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:35,586 - INFO - After Normalization*************************************** +2025-07-04 11:40:35,586 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:35,586 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:38,792 - INFO - Epoch 148/150 - Train Loss: 0.203702, Val Loss: 0.220369 +2025-07-04 11:40:41,039 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:41,052 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:41,053 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:41,053 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:41,053 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:41,053 - INFO - After Normalization*************************************** +2025-07-04 11:40:41,053 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:41,053 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:41,364 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:41,364 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:41,364 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:41,364 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:41,364 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:41,364 - INFO - After Normalization*************************************** +2025-07-04 11:40:41,364 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:41,365 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:41,654 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:41,654 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:41,654 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:41,654 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:41,654 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:41,654 - INFO - After Normalization*************************************** +2025-07-04 11:40:41,654 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:41,654 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:44,860 - INFO - Epoch 149/150 - Train Loss: 0.202560, Val Loss: 0.220369 +2025-07-04 11:40:45,482 - INFO - Epoch 146/150 - Train Loss: 0.093185, Val Loss: 0.086400 +2025-07-04 11:40:47,128 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:47,142 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:47,142 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:47,142 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:47,142 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:47,143 - INFO - After Normalization*************************************** +2025-07-04 11:40:47,143 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:47,143 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:47,447 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:47,447 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:47,447 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:47,447 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:47,447 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:47,448 - INFO - After Normalization*************************************** +2025-07-04 11:40:47,448 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:47,448 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:47,737 - INFO - before .to(local_rank)*************************************** +2025-07-04 11:40:47,737 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 11:40:47,737 - INFO - After .to(local_rank)*************************************** +2025-07-04 11:40:47,737 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:47,737 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:47,737 - INFO - After Normalization*************************************** +2025-07-04 11:40:47,737 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:47,737 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 11:40:50,986 - INFO - Epoch 150/150 - Train Loss: 0.202702, Val Loss: 0.220655 +2025-07-04 11:40:51,109 - INFO - Final model saved to experiments/Train_Test/final_model_tmp +2025-07-04 11:40:51,119 - INFO - Testing the final model +2025-07-04 11:40:51,119 - INFO - Testing the best model +2025-07-04 11:41:03,182 - INFO - Epoch 147/150 - Train Loss: 0.093196, Val Loss: 0.086339 +2025-07-04 11:41:20,884 - INFO - Epoch 148/150 - Train Loss: 0.092870, Val Loss: 0.086243 +2025-07-04 11:41:38,560 - INFO - Epoch 149/150 - Train Loss: 0.092973, Val Loss: 0.086341 +2025-07-04 11:41:56,249 - INFO - Epoch 150/150 - Train Loss: 0.093295, Val Loss: 0.086364 +2025-07-04 11:41:56,375 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-07-04 11:41:56,375 - INFO - Testing the final model +2025-07-04 11:42:00,622 - INFO - Total MSE across all processes: 4.534732341766357 +2025-07-04 11:42:00,624 - INFO - mean value for all_targets: {tmp} +2025-07-04 11:42:00,628 - INFO - Test MSE: 0.083977, Test MAE: 0.163705, Max AE: 14.103622, Test R2: 0.9250 +2025-07-04 11:42:00,628 - INFO - Relative L2 Error: 0.273197, Relative L1 error: 0.252804 +2025-07-04 11:42:00,628 - INFO - Total inference time: 0.03s for 54 samples +2025-07-04 11:42:00,630 - INFO - Testing the best model +2025-07-04 11:42:04,732 - INFO - Total MSE across all processes: 4.533969402313232 +2025-07-04 11:42:04,733 - INFO - mean value for all_targets: {tmp} +2025-07-04 11:42:04,735 - INFO - Test MSE: 0.083962, Test MAE: 0.163290, Max AE: 14.126799, Test R2: 0.9250 +2025-07-04 11:42:04,735 - INFO - Relative L2 Error: 0.273214, Relative L1 error: 0.252141 +2025-07-04 11:42:04,736 - INFO - Total inference time: 0.03s for 54 samples +2025-07-04 12:23:22,879 - INFO - args.exp_name : Train_Test +2025-07-04 12:23:22,887 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 12:23:22,887 - INFO - Starting training with 1 GPUs +2025-07-04 12:23:26,511 - INFO - Total trainable parameters: 1437705 +2025-07-04 12:23:26,553 - INFO - Data loaded: 3 training batches, 1 validation batches, 1 test batches +2025-07-04 12:23:26,556 - INFO - Staring training for 150 epochs +2025-07-04 12:23:30,606 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:23:30,611 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:23:30,611 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:23:30,611 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:30,631 - INFO - After Normalization*************************************** +2025-07-04 12:23:30,632 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:31,532 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:23:31,532 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:23:31,532 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:23:31,532 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:31,532 - INFO - After Normalization*************************************** +2025-07-04 12:23:31,533 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:31,827 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:23:31,827 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:23:31,828 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:23:31,828 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:31,828 - INFO - After Normalization*************************************** +2025-07-04 12:23:31,828 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:36,031 - INFO - Epoch 1/150 - Train Loss: 1.283437, Val Loss: 1.146866 +2025-07-04 12:23:36,049 - INFO - New best model saved with Val Loss: 1.146866 +2025-07-04 12:23:38,300 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:23:38,314 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:23:38,315 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:23:38,315 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:38,315 - INFO - After Normalization*************************************** +2025-07-04 12:23:38,315 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:38,621 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:23:38,621 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:23:38,621 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:23:38,621 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:38,622 - INFO - After Normalization*************************************** +2025-07-04 12:23:38,622 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:38,911 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:23:38,911 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:23:38,911 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:23:38,911 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:38,911 - INFO - After Normalization*************************************** +2025-07-04 12:23:38,912 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:42,151 - INFO - Epoch 2/150 - Train Loss: 1.159811, Val Loss: 1.148012 +2025-07-04 12:23:44,523 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:23:44,536 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:23:44,536 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:23:44,536 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:44,536 - INFO - After Normalization*************************************** +2025-07-04 12:23:44,537 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:44,841 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:23:44,841 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:23:44,842 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:23:44,842 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:44,842 - INFO - After Normalization*************************************** +2025-07-04 12:23:44,842 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:45,131 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:23:45,131 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:23:45,131 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:23:45,132 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:45,132 - INFO - After Normalization*************************************** +2025-07-04 12:23:45,132 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:49,923 - INFO - Epoch 3/150 - Train Loss: 1.015355, Val Loss: 1.148288 +2025-07-04 12:23:52,855 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:23:52,861 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:23:52,861 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:23:52,861 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:52,861 - INFO - After Normalization*************************************** +2025-07-04 12:23:52,861 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:53,183 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:23:53,183 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:23:53,183 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:23:53,183 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:53,183 - INFO - After Normalization*************************************** +2025-07-04 12:23:53,183 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:53,472 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:23:53,472 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:23:53,472 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:23:53,472 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:53,473 - INFO - After Normalization*************************************** +2025-07-04 12:23:53,473 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:57,118 - INFO - Epoch 4/150 - Train Loss: 0.916734, Val Loss: 1.249089 +2025-07-04 12:23:59,453 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:23:59,468 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:23:59,468 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:23:59,468 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:59,469 - INFO - After Normalization*************************************** +2025-07-04 12:23:59,471 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:59,790 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:23:59,790 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:23:59,790 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:23:59,790 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:23:59,790 - INFO - After Normalization*************************************** +2025-07-04 12:23:59,790 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:00,079 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:00,079 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:00,080 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:00,080 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:00,080 - INFO - After Normalization*************************************** +2025-07-04 12:24:00,080 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:03,706 - INFO - Epoch 5/150 - Train Loss: 0.839702, Val Loss: 1.442995 +2025-07-04 12:24:08,749 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:08,754 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:08,755 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:08,755 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:08,755 - INFO - After Normalization*************************************** +2025-07-04 12:24:08,755 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:09,070 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:09,070 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:09,070 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:09,070 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:09,070 - INFO - After Normalization*************************************** +2025-07-04 12:24:09,071 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:09,360 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:09,360 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:09,360 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:09,360 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:09,360 - INFO - After Normalization*************************************** +2025-07-04 12:24:09,360 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:17,006 - INFO - Epoch 6/150 - Train Loss: 0.757819, Val Loss: 1.361055 +2025-07-04 12:24:21,841 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:21,847 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:21,848 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:21,848 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:21,848 - INFO - After Normalization*************************************** +2025-07-04 12:24:21,848 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:22,159 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:22,159 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:22,160 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:22,160 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:22,160 - INFO - After Normalization*************************************** +2025-07-04 12:24:22,160 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:22,450 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:22,450 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:22,450 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:22,450 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:22,450 - INFO - After Normalization*************************************** +2025-07-04 12:24:22,451 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:26,617 - INFO - Epoch 7/150 - Train Loss: 0.661384, Val Loss: 1.379884 +2025-07-04 12:24:28,913 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:28,928 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:28,928 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:28,929 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:28,929 - INFO - After Normalization*************************************** +2025-07-04 12:24:28,929 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:29,235 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:29,235 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:29,235 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:29,235 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:29,235 - INFO - After Normalization*************************************** +2025-07-04 12:24:29,235 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:29,524 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:29,524 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:29,525 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:29,525 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:29,525 - INFO - After Normalization*************************************** +2025-07-04 12:24:29,525 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:32,911 - INFO - Epoch 8/150 - Train Loss: 0.608690, Val Loss: 1.595568 +2025-07-04 12:24:35,196 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:35,212 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:35,212 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:35,212 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:35,212 - INFO - After Normalization*************************************** +2025-07-04 12:24:35,212 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:35,514 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:35,514 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:35,514 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:35,514 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:35,514 - INFO - After Normalization*************************************** +2025-07-04 12:24:35,514 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:35,809 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:35,809 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:35,809 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:35,809 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:35,809 - INFO - After Normalization*************************************** +2025-07-04 12:24:35,809 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:39,175 - INFO - Epoch 9/150 - Train Loss: 0.560139, Val Loss: 1.838627 +2025-07-04 12:24:41,477 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:41,494 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:41,494 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:41,494 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:41,495 - INFO - After Normalization*************************************** +2025-07-04 12:24:41,495 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:41,817 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:41,817 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:41,817 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:41,817 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:41,818 - INFO - After Normalization*************************************** +2025-07-04 12:24:41,818 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:42,106 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:42,106 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:42,107 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:42,107 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:42,107 - INFO - After Normalization*************************************** +2025-07-04 12:24:42,107 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:45,429 - INFO - Epoch 10/150 - Train Loss: 0.526667, Val Loss: 1.521763 +2025-07-04 12:24:47,868 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:47,883 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:47,883 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:47,883 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:47,883 - INFO - After Normalization*************************************** +2025-07-04 12:24:47,883 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:48,183 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:48,183 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:48,184 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:48,184 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:48,184 - INFO - After Normalization*************************************** +2025-07-04 12:24:48,184 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:48,473 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:48,473 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:48,474 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:48,474 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:48,474 - INFO - After Normalization*************************************** +2025-07-04 12:24:48,474 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:51,759 - INFO - Epoch 11/150 - Train Loss: 0.494009, Val Loss: 1.437564 +2025-07-04 12:24:54,060 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:54,076 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:54,076 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:54,076 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:54,076 - INFO - After Normalization*************************************** +2025-07-04 12:24:54,076 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:54,379 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:54,379 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:54,381 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:54,381 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:54,381 - INFO - After Normalization*************************************** +2025-07-04 12:24:54,381 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:54,670 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:24:54,670 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:24:54,670 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:24:54,671 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:54,671 - INFO - After Normalization*************************************** +2025-07-04 12:24:54,671 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:24:58,014 - INFO - Epoch 12/150 - Train Loss: 0.474216, Val Loss: 1.348367 +2025-07-04 12:25:00,324 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:00,339 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:00,340 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:00,340 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:00,340 - INFO - After Normalization*************************************** +2025-07-04 12:25:00,340 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:00,641 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:00,641 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:00,641 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:00,641 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:00,641 - INFO - After Normalization*************************************** +2025-07-04 12:25:00,641 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:00,936 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:00,937 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:00,937 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:00,937 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:00,937 - INFO - After Normalization*************************************** +2025-07-04 12:25:00,937 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:04,242 - INFO - Epoch 13/150 - Train Loss: 0.457834, Val Loss: 1.089559 +2025-07-04 12:25:04,271 - INFO - New best model saved with Val Loss: 1.089559 +2025-07-04 12:25:06,578 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:06,592 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:06,592 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:06,592 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:06,592 - INFO - After Normalization*************************************** +2025-07-04 12:25:06,592 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:06,911 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:06,911 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:06,912 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:06,912 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:06,912 - INFO - After Normalization*************************************** +2025-07-04 12:25:06,912 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:07,206 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:07,206 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:07,208 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:07,208 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:07,208 - INFO - After Normalization*************************************** +2025-07-04 12:25:07,208 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:10,508 - INFO - Epoch 14/150 - Train Loss: 0.455360, Val Loss: 0.832426 +2025-07-04 12:25:10,524 - INFO - New best model saved with Val Loss: 0.832426 +2025-07-04 12:25:12,811 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:12,825 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:12,826 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:12,826 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:12,826 - INFO - After Normalization*************************************** +2025-07-04 12:25:12,826 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:13,150 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:13,150 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:13,150 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:13,150 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:13,150 - INFO - After Normalization*************************************** +2025-07-04 12:25:13,151 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:13,440 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:13,440 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:13,441 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:13,441 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:13,441 - INFO - After Normalization*************************************** +2025-07-04 12:25:13,441 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:16,773 - INFO - Epoch 15/150 - Train Loss: 0.447783, Val Loss: 0.654857 +2025-07-04 12:25:16,787 - INFO - New best model saved with Val Loss: 0.654857 +2025-07-04 12:25:19,070 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:19,085 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:19,085 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:19,085 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:19,085 - INFO - After Normalization*************************************** +2025-07-04 12:25:19,085 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:19,400 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:19,401 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:19,401 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:19,401 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:19,401 - INFO - After Normalization*************************************** +2025-07-04 12:25:19,401 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:19,690 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:19,691 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:19,691 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:19,691 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:19,691 - INFO - After Normalization*************************************** +2025-07-04 12:25:19,691 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:23,022 - INFO - Epoch 16/150 - Train Loss: 0.446389, Val Loss: 0.548675 +2025-07-04 12:25:23,037 - INFO - New best model saved with Val Loss: 0.548675 +2025-07-04 12:25:25,329 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:25,342 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:25,342 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:25,342 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:25,342 - INFO - After Normalization*************************************** +2025-07-04 12:25:25,342 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:25,653 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:25,653 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:25,654 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:25,654 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:25,654 - INFO - After Normalization*************************************** +2025-07-04 12:25:25,654 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:25,943 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:25,943 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:25,944 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:25,944 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:25,944 - INFO - After Normalization*************************************** +2025-07-04 12:25:25,944 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:29,296 - INFO - Epoch 17/150 - Train Loss: 0.440610, Val Loss: 0.495317 +2025-07-04 12:25:29,311 - INFO - New best model saved with Val Loss: 0.495317 +2025-07-04 12:25:31,600 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:31,614 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:31,615 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:31,615 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:31,615 - INFO - After Normalization*************************************** +2025-07-04 12:25:31,615 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:31,922 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:31,922 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:31,922 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:31,922 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:31,922 - INFO - After Normalization*************************************** +2025-07-04 12:25:31,922 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:32,216 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:32,217 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:32,217 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:32,217 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:32,217 - INFO - After Normalization*************************************** +2025-07-04 12:25:32,217 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:35,527 - INFO - Epoch 18/150 - Train Loss: 0.438833, Val Loss: 0.464275 +2025-07-04 12:25:35,543 - INFO - New best model saved with Val Loss: 0.464275 +2025-07-04 12:25:37,839 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:37,854 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:37,854 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:37,854 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:37,854 - INFO - After Normalization*************************************** +2025-07-04 12:25:37,854 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:38,165 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:38,165 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:38,166 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:38,166 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:38,166 - INFO - After Normalization*************************************** +2025-07-04 12:25:38,166 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:38,455 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:38,455 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:38,456 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:38,456 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:38,456 - INFO - After Normalization*************************************** +2025-07-04 12:25:38,456 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:41,751 - INFO - Epoch 19/150 - Train Loss: 0.439969, Val Loss: 0.443737 +2025-07-04 12:25:41,765 - INFO - New best model saved with Val Loss: 0.443737 +2025-07-04 12:25:44,065 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:44,078 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:44,079 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:44,079 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:44,079 - INFO - After Normalization*************************************** +2025-07-04 12:25:44,079 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:44,389 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:44,390 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:44,390 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:44,390 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:44,390 - INFO - After Normalization*************************************** +2025-07-04 12:25:44,390 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:44,679 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:44,679 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:44,680 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:44,680 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:44,680 - INFO - After Normalization*************************************** +2025-07-04 12:25:44,680 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:47,993 - INFO - Epoch 20/150 - Train Loss: 0.431883, Val Loss: 0.428062 +2025-07-04 12:25:48,008 - INFO - New best model saved with Val Loss: 0.428062 +2025-07-04 12:25:50,430 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:50,445 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:50,445 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:50,445 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:50,445 - INFO - After Normalization*************************************** +2025-07-04 12:25:50,445 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:50,758 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:50,759 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:50,759 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:50,759 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:50,759 - INFO - After Normalization*************************************** +2025-07-04 12:25:50,760 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:51,049 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:51,049 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:51,049 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:51,050 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:51,050 - INFO - After Normalization*************************************** +2025-07-04 12:25:51,050 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:55,007 - INFO - Epoch 21/150 - Train Loss: 0.431155, Val Loss: 0.423547 +2025-07-04 12:25:55,104 - INFO - New best model saved with Val Loss: 0.423547 +2025-07-04 12:25:57,408 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:57,423 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:57,423 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:57,424 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:57,424 - INFO - After Normalization*************************************** +2025-07-04 12:25:57,424 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:57,736 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:57,736 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:57,736 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:57,737 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:57,737 - INFO - After Normalization*************************************** +2025-07-04 12:25:57,737 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:58,026 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:25:58,026 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:25:58,026 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:25:58,026 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:25:58,026 - INFO - After Normalization*************************************** +2025-07-04 12:25:58,026 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:01,347 - INFO - Epoch 22/150 - Train Loss: 0.428144, Val Loss: 0.426146 +2025-07-04 12:26:03,674 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:03,690 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:03,690 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:03,690 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:03,690 - INFO - After Normalization*************************************** +2025-07-04 12:26:03,691 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:04,006 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:04,006 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:04,007 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:04,007 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:04,007 - INFO - After Normalization*************************************** +2025-07-04 12:26:04,007 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:04,296 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:04,296 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:04,296 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:04,296 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:04,296 - INFO - After Normalization*************************************** +2025-07-04 12:26:04,296 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:07,654 - INFO - Epoch 23/150 - Train Loss: 0.425435, Val Loss: 0.427524 +2025-07-04 12:26:09,952 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:09,967 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:09,967 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:09,967 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:09,967 - INFO - After Normalization*************************************** +2025-07-04 12:26:09,967 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:10,277 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:10,277 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:10,277 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:10,277 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:10,278 - INFO - After Normalization*************************************** +2025-07-04 12:26:10,278 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:10,572 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:10,572 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:10,572 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:10,572 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:10,573 - INFO - After Normalization*************************************** +2025-07-04 12:26:10,573 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:13,899 - INFO - Epoch 24/150 - Train Loss: 0.422210, Val Loss: 0.422279 +2025-07-04 12:26:13,916 - INFO - New best model saved with Val Loss: 0.422279 +2025-07-04 12:26:16,185 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:16,200 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:16,200 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:16,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:16,200 - INFO - After Normalization*************************************** +2025-07-04 12:26:16,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:16,515 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:16,515 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:16,516 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:16,516 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:16,516 - INFO - After Normalization*************************************** +2025-07-04 12:26:16,516 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:16,805 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:16,805 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:16,806 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:16,806 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:16,806 - INFO - After Normalization*************************************** +2025-07-04 12:26:16,806 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:20,149 - INFO - Epoch 25/150 - Train Loss: 0.423424, Val Loss: 0.427988 +2025-07-04 12:26:22,459 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:22,474 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:22,474 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:22,474 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:22,474 - INFO - After Normalization*************************************** +2025-07-04 12:26:22,475 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:22,780 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:22,781 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:22,781 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:22,781 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:22,781 - INFO - After Normalization*************************************** +2025-07-04 12:26:22,781 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:23,070 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:23,070 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:23,070 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:23,070 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:23,070 - INFO - After Normalization*************************************** +2025-07-04 12:26:23,070 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:26,430 - INFO - Epoch 26/150 - Train Loss: 0.416589, Val Loss: 0.438708 +2025-07-04 12:26:28,742 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:28,757 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:28,757 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:28,758 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:28,758 - INFO - After Normalization*************************************** +2025-07-04 12:26:28,758 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:29,058 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:29,058 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:29,058 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:29,058 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:29,058 - INFO - After Normalization*************************************** +2025-07-04 12:26:29,058 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:29,347 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:29,347 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:29,348 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:29,348 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:29,348 - INFO - After Normalization*************************************** +2025-07-04 12:26:29,348 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:32,705 - INFO - Epoch 27/150 - Train Loss: 0.415543, Val Loss: 0.429039 +2025-07-04 12:26:35,019 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:35,035 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:35,035 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:35,035 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:35,035 - INFO - After Normalization*************************************** +2025-07-04 12:26:35,035 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:35,351 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:35,351 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:35,351 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:35,351 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:35,352 - INFO - After Normalization*************************************** +2025-07-04 12:26:35,352 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:35,647 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:35,647 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:35,648 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:35,648 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:35,648 - INFO - After Normalization*************************************** +2025-07-04 12:26:35,648 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:38,991 - INFO - Epoch 28/150 - Train Loss: 0.413719, Val Loss: 0.420002 +2025-07-04 12:26:39,008 - INFO - New best model saved with Val Loss: 0.420002 +2025-07-04 12:26:41,309 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:41,324 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:41,325 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:41,325 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:41,325 - INFO - After Normalization*************************************** +2025-07-04 12:26:41,325 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:41,639 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:41,639 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:41,639 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:41,639 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:41,639 - INFO - After Normalization*************************************** +2025-07-04 12:26:41,639 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:41,928 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:41,928 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:41,929 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:41,929 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:41,929 - INFO - After Normalization*************************************** +2025-07-04 12:26:41,929 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:45,231 - INFO - Epoch 29/150 - Train Loss: 0.410406, Val Loss: 0.413966 +2025-07-04 12:26:45,246 - INFO - New best model saved with Val Loss: 0.413966 +2025-07-04 12:26:47,566 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:47,582 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:47,582 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:47,582 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:47,582 - INFO - After Normalization*************************************** +2025-07-04 12:26:47,582 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:47,889 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:47,889 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:47,889 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:47,889 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:47,889 - INFO - After Normalization*************************************** +2025-07-04 12:26:47,889 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:48,179 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:48,179 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:48,179 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:48,179 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:48,179 - INFO - After Normalization*************************************** +2025-07-04 12:26:48,179 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:51,512 - INFO - Epoch 30/150 - Train Loss: 0.409573, Val Loss: 0.411146 +2025-07-04 12:26:51,525 - INFO - New best model saved with Val Loss: 0.411146 +2025-07-04 12:26:54,233 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:54,249 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:54,249 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:54,250 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:54,250 - INFO - After Normalization*************************************** +2025-07-04 12:26:54,250 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:54,552 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:54,553 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:54,553 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:54,553 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:54,553 - INFO - After Normalization*************************************** +2025-07-04 12:26:54,553 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:54,848 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:26:54,848 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:26:54,849 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:26:54,849 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:54,849 - INFO - After Normalization*************************************** +2025-07-04 12:26:54,849 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:26:58,232 - INFO - Epoch 31/150 - Train Loss: 0.407763, Val Loss: 0.405612 +2025-07-04 12:26:58,248 - INFO - New best model saved with Val Loss: 0.405612 +2025-07-04 12:27:00,547 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:00,562 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:00,562 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:00,562 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:00,562 - INFO - After Normalization*************************************** +2025-07-04 12:27:00,562 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:00,883 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:00,883 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:00,884 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:00,884 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:00,884 - INFO - After Normalization*************************************** +2025-07-04 12:27:00,884 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:01,173 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:01,173 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:01,174 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:01,174 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:01,174 - INFO - After Normalization*************************************** +2025-07-04 12:27:01,174 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:04,528 - INFO - Epoch 32/150 - Train Loss: 0.407623, Val Loss: 0.406176 +2025-07-04 12:27:06,843 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:06,859 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:06,859 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:06,859 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:06,859 - INFO - After Normalization*************************************** +2025-07-04 12:27:06,861 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:07,168 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:07,168 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:07,168 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:07,168 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:07,169 - INFO - After Normalization*************************************** +2025-07-04 12:27:07,169 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:07,457 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:07,458 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:07,458 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:07,458 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:07,458 - INFO - After Normalization*************************************** +2025-07-04 12:27:07,458 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:10,813 - INFO - Epoch 33/150 - Train Loss: 0.403604, Val Loss: 0.407962 +2025-07-04 12:27:13,102 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:13,117 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:13,117 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:13,117 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:13,117 - INFO - After Normalization*************************************** +2025-07-04 12:27:13,117 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:13,422 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:13,422 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:13,423 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:13,423 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:13,423 - INFO - After Normalization*************************************** +2025-07-04 12:27:13,423 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:13,712 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:13,712 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:13,712 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:13,713 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:13,713 - INFO - After Normalization*************************************** +2025-07-04 12:27:13,713 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:17,051 - INFO - Epoch 34/150 - Train Loss: 0.399802, Val Loss: 0.401497 +2025-07-04 12:27:17,068 - INFO - New best model saved with Val Loss: 0.401497 +2025-07-04 12:27:19,388 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:19,403 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:19,403 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:19,404 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:19,404 - INFO - After Normalization*************************************** +2025-07-04 12:27:19,404 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:19,706 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:19,706 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:19,707 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:19,707 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:19,707 - INFO - After Normalization*************************************** +2025-07-04 12:27:19,708 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:20,003 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:20,003 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:20,004 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:20,004 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:20,004 - INFO - After Normalization*************************************** +2025-07-04 12:27:20,004 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:23,332 - INFO - Epoch 35/150 - Train Loss: 0.400421, Val Loss: 0.400646 +2025-07-04 12:27:23,345 - INFO - New best model saved with Val Loss: 0.400646 +2025-07-04 12:27:25,652 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:25,667 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:25,667 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:25,667 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:25,667 - INFO - After Normalization*************************************** +2025-07-04 12:27:25,667 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:25,989 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:25,989 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:25,990 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:25,990 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:25,990 - INFO - After Normalization*************************************** +2025-07-04 12:27:25,990 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:26,279 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:26,279 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:26,279 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:26,279 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:26,279 - INFO - After Normalization*************************************** +2025-07-04 12:27:26,279 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:29,585 - INFO - Epoch 36/150 - Train Loss: 0.395775, Val Loss: 0.403946 +2025-07-04 12:27:31,864 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:31,879 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:31,879 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:31,879 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:31,880 - INFO - After Normalization*************************************** +2025-07-04 12:27:31,880 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:32,200 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:32,200 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:32,200 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:32,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:32,200 - INFO - After Normalization*************************************** +2025-07-04 12:27:32,200 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:32,490 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:32,490 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:32,490 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:32,490 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:32,490 - INFO - After Normalization*************************************** +2025-07-04 12:27:32,491 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:35,809 - INFO - Epoch 37/150 - Train Loss: 0.395949, Val Loss: 0.401222 +2025-07-04 12:27:38,099 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:38,113 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:38,114 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:38,114 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:38,114 - INFO - After Normalization*************************************** +2025-07-04 12:27:38,114 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:38,433 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:38,433 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:38,434 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:38,434 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:38,434 - INFO - After Normalization*************************************** +2025-07-04 12:27:38,434 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:38,723 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:38,723 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:38,724 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:38,724 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:38,724 - INFO - After Normalization*************************************** +2025-07-04 12:27:38,724 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:42,100 - INFO - Epoch 38/150 - Train Loss: 0.393724, Val Loss: 0.398120 +2025-07-04 12:27:42,116 - INFO - New best model saved with Val Loss: 0.398120 +2025-07-04 12:27:44,389 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:44,404 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:44,405 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:44,405 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:44,405 - INFO - After Normalization*************************************** +2025-07-04 12:27:44,405 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:44,715 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:44,715 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:44,716 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:44,716 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:44,716 - INFO - After Normalization*************************************** +2025-07-04 12:27:44,716 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:45,010 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:45,010 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:45,011 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:45,011 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:45,011 - INFO - After Normalization*************************************** +2025-07-04 12:27:45,011 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:48,371 - INFO - Epoch 39/150 - Train Loss: 0.390039, Val Loss: 0.399025 +2025-07-04 12:27:50,687 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:50,701 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:50,702 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:50,702 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:50,702 - INFO - After Normalization*************************************** +2025-07-04 12:27:50,703 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:51,012 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:51,012 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:51,012 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:51,012 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:51,012 - INFO - After Normalization*************************************** +2025-07-04 12:27:51,012 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:51,301 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:51,301 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:51,301 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:51,301 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:51,301 - INFO - After Normalization*************************************** +2025-07-04 12:27:51,301 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:54,601 - INFO - Epoch 40/150 - Train Loss: 0.391684, Val Loss: 0.390313 +2025-07-04 12:27:54,616 - INFO - New best model saved with Val Loss: 0.390313 +2025-07-04 12:27:57,059 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:57,073 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:57,074 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:57,074 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:57,074 - INFO - After Normalization*************************************** +2025-07-04 12:27:57,074 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:57,383 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:57,384 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:57,384 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:57,384 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:57,384 - INFO - After Normalization*************************************** +2025-07-04 12:27:57,384 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:57,673 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:27:57,673 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:27:57,673 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:27:57,674 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:27:57,674 - INFO - After Normalization*************************************** +2025-07-04 12:27:57,674 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:01,001 - INFO - Epoch 41/150 - Train Loss: 0.386746, Val Loss: 0.386531 +2025-07-04 12:28:01,026 - INFO - New best model saved with Val Loss: 0.386531 +2025-07-04 12:28:03,337 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:03,353 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:03,353 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:03,353 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:03,353 - INFO - After Normalization*************************************** +2025-07-04 12:28:03,353 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:03,659 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:03,659 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:03,659 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:03,659 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:03,661 - INFO - After Normalization*************************************** +2025-07-04 12:28:03,661 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:03,950 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:03,950 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:03,950 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:03,950 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:03,950 - INFO - After Normalization*************************************** +2025-07-04 12:28:03,950 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:07,299 - INFO - Epoch 42/150 - Train Loss: 0.386503, Val Loss: 0.384749 +2025-07-04 12:28:07,314 - INFO - New best model saved with Val Loss: 0.384749 +2025-07-04 12:28:09,599 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:09,615 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:09,615 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:09,615 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:09,615 - INFO - After Normalization*************************************** +2025-07-04 12:28:09,615 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:09,917 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:09,917 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:09,918 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:09,918 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:09,918 - INFO - After Normalization*************************************** +2025-07-04 12:28:09,918 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:10,213 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:10,213 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:10,213 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:10,213 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:10,213 - INFO - After Normalization*************************************** +2025-07-04 12:28:10,213 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:13,516 - INFO - Epoch 43/150 - Train Loss: 0.385221, Val Loss: 0.382403 +2025-07-04 12:28:13,531 - INFO - New best model saved with Val Loss: 0.382403 +2025-07-04 12:28:15,847 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:15,863 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:15,863 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:15,863 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:15,863 - INFO - After Normalization*************************************** +2025-07-04 12:28:15,863 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:16,169 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:16,169 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:16,170 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:16,170 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:16,170 - INFO - After Normalization*************************************** +2025-07-04 12:28:16,170 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:16,459 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:16,459 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:16,459 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:16,459 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:16,461 - INFO - After Normalization*************************************** +2025-07-04 12:28:16,461 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:19,800 - INFO - Epoch 44/150 - Train Loss: 0.379563, Val Loss: 0.379401 +2025-07-04 12:28:19,816 - INFO - New best model saved with Val Loss: 0.379401 +2025-07-04 12:28:22,110 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:22,125 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:22,125 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:22,125 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:22,125 - INFO - After Normalization*************************************** +2025-07-04 12:28:22,125 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:22,451 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:22,451 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:22,451 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:22,451 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:22,451 - INFO - After Normalization*************************************** +2025-07-04 12:28:22,451 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:22,740 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:22,741 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:22,741 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:22,741 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:22,741 - INFO - After Normalization*************************************** +2025-07-04 12:28:22,741 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:26,098 - INFO - Epoch 45/150 - Train Loss: 0.381152, Val Loss: 0.383046 +2025-07-04 12:28:28,406 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:28,421 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:28,421 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:28,421 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:28,421 - INFO - After Normalization*************************************** +2025-07-04 12:28:28,421 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:28,731 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:28,731 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:28,731 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:28,731 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:28,731 - INFO - After Normalization*************************************** +2025-07-04 12:28:28,731 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:29,020 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:29,021 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:29,021 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:29,021 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:29,021 - INFO - After Normalization*************************************** +2025-07-04 12:28:29,021 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:32,385 - INFO - Epoch 46/150 - Train Loss: 0.377841, Val Loss: 0.374077 +2025-07-04 12:28:32,402 - INFO - New best model saved with Val Loss: 0.374077 +2025-07-04 12:28:34,703 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:34,718 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:34,718 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:34,719 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:34,720 - INFO - After Normalization*************************************** +2025-07-04 12:28:34,720 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:35,022 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:35,022 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:35,023 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:35,023 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:35,023 - INFO - After Normalization*************************************** +2025-07-04 12:28:35,023 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:35,316 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:35,316 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:35,316 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:35,316 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:35,316 - INFO - After Normalization*************************************** +2025-07-04 12:28:35,316 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:38,695 - INFO - Epoch 47/150 - Train Loss: 0.376989, Val Loss: 0.374267 +2025-07-04 12:28:41,068 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:41,083 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:41,084 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:41,084 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:41,084 - INFO - After Normalization*************************************** +2025-07-04 12:28:41,084 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:41,398 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:41,398 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:41,398 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:41,398 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:41,398 - INFO - After Normalization*************************************** +2025-07-04 12:28:41,398 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:41,687 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:41,688 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:41,688 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:41,688 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:41,688 - INFO - After Normalization*************************************** +2025-07-04 12:28:41,688 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:45,427 - INFO - Epoch 48/150 - Train Loss: 0.372625, Val Loss: 0.375827 +2025-07-04 12:28:47,751 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:47,766 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:47,767 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:47,767 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:47,767 - INFO - After Normalization*************************************** +2025-07-04 12:28:47,767 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:48,072 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:48,072 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:48,073 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:48,073 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:48,074 - INFO - After Normalization*************************************** +2025-07-04 12:28:48,074 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:48,369 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:48,369 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:48,369 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:48,369 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:48,369 - INFO - After Normalization*************************************** +2025-07-04 12:28:48,369 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:51,668 - INFO - Epoch 49/150 - Train Loss: 0.373013, Val Loss: 0.377917 +2025-07-04 12:28:53,995 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:54,010 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:54,011 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:54,011 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:54,011 - INFO - After Normalization*************************************** +2025-07-04 12:28:54,011 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:54,315 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:54,315 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:54,315 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:54,315 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:54,315 - INFO - After Normalization*************************************** +2025-07-04 12:28:54,315 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:54,611 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:28:54,611 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:28:54,612 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:28:54,612 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:54,612 - INFO - After Normalization*************************************** +2025-07-04 12:28:54,612 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:28:57,948 - INFO - Epoch 50/150 - Train Loss: 0.370192, Val Loss: 0.374004 +2025-07-04 12:28:57,965 - INFO - New best model saved with Val Loss: 0.374004 +2025-07-04 12:29:00,384 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:00,399 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:00,414 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:00,429 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:00,443 - INFO - After Normalization*************************************** +2025-07-04 12:29:00,457 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:00,777 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:00,777 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:00,778 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:00,778 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:00,778 - INFO - After Normalization*************************************** +2025-07-04 12:29:00,778 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:01,067 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:01,067 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:01,067 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:01,068 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:01,069 - INFO - After Normalization*************************************** +2025-07-04 12:29:01,069 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:04,415 - INFO - Epoch 51/150 - Train Loss: 0.370575, Val Loss: 0.368872 +2025-07-04 12:29:04,430 - INFO - New best model saved with Val Loss: 0.368872 +2025-07-04 12:29:06,716 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:06,732 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:06,732 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:06,732 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:06,732 - INFO - After Normalization*************************************** +2025-07-04 12:29:06,732 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:07,052 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:07,052 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:07,052 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:07,052 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:07,053 - INFO - After Normalization*************************************** +2025-07-04 12:29:07,053 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:07,342 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:07,342 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:07,342 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:07,342 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:07,342 - INFO - After Normalization*************************************** +2025-07-04 12:29:07,342 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:10,707 - INFO - Epoch 52/150 - Train Loss: 0.366457, Val Loss: 0.366532 +2025-07-04 12:29:10,723 - INFO - New best model saved with Val Loss: 0.366532 +2025-07-04 12:29:12,995 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:13,010 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:13,011 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:13,011 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:13,011 - INFO - After Normalization*************************************** +2025-07-04 12:29:13,011 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:13,323 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:13,323 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:13,323 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:13,324 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:13,324 - INFO - After Normalization*************************************** +2025-07-04 12:29:13,324 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:13,619 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:13,619 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:13,619 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:13,620 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:13,620 - INFO - After Normalization*************************************** +2025-07-04 12:29:13,620 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:16,962 - INFO - Epoch 53/150 - Train Loss: 0.364227, Val Loss: 0.364998 +2025-07-04 12:29:16,975 - INFO - New best model saved with Val Loss: 0.364998 +2025-07-04 12:29:19,286 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:19,296 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:19,297 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:19,297 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:19,298 - INFO - After Normalization*************************************** +2025-07-04 12:29:19,298 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:19,615 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:19,627 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:19,640 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:19,655 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:19,655 - INFO - After Normalization*************************************** +2025-07-04 12:29:19,667 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:19,975 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:19,975 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:19,975 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:19,975 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:19,975 - INFO - After Normalization*************************************** +2025-07-04 12:29:19,975 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:23,616 - INFO - Epoch 54/150 - Train Loss: 0.362108, Val Loss: 0.361583 +2025-07-04 12:29:23,633 - INFO - New best model saved with Val Loss: 0.361583 +2025-07-04 12:29:26,063 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:26,072 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:26,072 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:26,072 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:26,072 - INFO - After Normalization*************************************** +2025-07-04 12:29:26,072 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:26,384 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:26,384 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:26,384 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:26,384 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:26,384 - INFO - After Normalization*************************************** +2025-07-04 12:29:26,384 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:26,673 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:26,673 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:26,673 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:26,673 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:26,673 - INFO - After Normalization*************************************** +2025-07-04 12:29:26,673 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:30,065 - INFO - Epoch 55/150 - Train Loss: 0.362925, Val Loss: 0.360435 +2025-07-04 12:29:30,084 - INFO - New best model saved with Val Loss: 0.360435 +2025-07-04 12:29:34,765 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:34,770 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:34,771 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:34,771 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:34,771 - INFO - After Normalization*************************************** +2025-07-04 12:29:34,771 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:35,077 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:35,077 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:35,078 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:35,078 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:35,078 - INFO - After Normalization*************************************** +2025-07-04 12:29:35,078 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:35,368 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:35,368 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:35,368 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:35,368 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:35,368 - INFO - After Normalization*************************************** +2025-07-04 12:29:35,368 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:43,429 - INFO - Epoch 56/150 - Train Loss: 0.360084, Val Loss: 0.360202 +2025-07-04 12:29:43,450 - INFO - New best model saved with Val Loss: 0.360202 +2025-07-04 12:29:50,930 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:50,936 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:50,936 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:50,936 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:50,936 - INFO - After Normalization*************************************** +2025-07-04 12:29:50,936 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:51,254 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:51,254 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:51,255 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:51,255 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:51,255 - INFO - After Normalization*************************************** +2025-07-04 12:29:51,255 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:51,544 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:29:51,544 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:29:51,544 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:29:51,544 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:51,544 - INFO - After Normalization*************************************** +2025-07-04 12:29:51,544 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:29:58,545 - INFO - Epoch 57/150 - Train Loss: 0.356597, Val Loss: 0.362886 +2025-07-04 12:30:01,242 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:01,249 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:01,249 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:01,249 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:01,249 - INFO - After Normalization*************************************** +2025-07-04 12:30:01,249 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:01,557 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:01,557 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:01,558 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:01,558 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:01,558 - INFO - After Normalization*************************************** +2025-07-04 12:30:01,558 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:01,852 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:01,852 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:01,854 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:01,854 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:01,854 - INFO - After Normalization*************************************** +2025-07-04 12:30:01,854 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:05,206 - INFO - Epoch 58/150 - Train Loss: 0.359285, Val Loss: 0.363573 +2025-07-04 12:30:07,517 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:07,532 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:07,532 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:07,532 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:07,532 - INFO - After Normalization*************************************** +2025-07-04 12:30:07,532 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:07,835 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:07,835 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:07,835 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:07,835 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:07,835 - INFO - After Normalization*************************************** +2025-07-04 12:30:07,835 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:08,130 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:08,130 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:08,130 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:08,130 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:08,130 - INFO - After Normalization*************************************** +2025-07-04 12:30:08,130 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:11,465 - INFO - Epoch 59/150 - Train Loss: 0.353965, Val Loss: 0.367631 +2025-07-04 12:30:13,759 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:13,773 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:13,774 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:13,774 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:13,774 - INFO - After Normalization*************************************** +2025-07-04 12:30:13,774 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:14,081 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:14,081 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:14,081 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:14,082 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:14,082 - INFO - After Normalization*************************************** +2025-07-04 12:30:14,082 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:14,371 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:14,371 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:14,371 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:14,371 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:14,371 - INFO - After Normalization*************************************** +2025-07-04 12:30:14,371 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:17,679 - INFO - Epoch 60/150 - Train Loss: 0.355830, Val Loss: 0.363127 +2025-07-04 12:30:20,095 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:20,109 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:20,110 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:20,110 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:20,111 - INFO - After Normalization*************************************** +2025-07-04 12:30:20,111 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:20,420 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:20,420 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:20,421 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:20,421 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:20,421 - INFO - After Normalization*************************************** +2025-07-04 12:30:20,421 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:20,710 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:20,710 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:20,710 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:20,710 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:20,710 - INFO - After Normalization*************************************** +2025-07-04 12:30:20,710 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:24,065 - INFO - Epoch 61/150 - Train Loss: 0.352915, Val Loss: 0.355417 +2025-07-04 12:30:24,082 - INFO - New best model saved with Val Loss: 0.355417 +2025-07-04 12:30:26,351 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:26,365 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:26,365 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:26,365 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:26,365 - INFO - After Normalization*************************************** +2025-07-04 12:30:26,365 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:26,673 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:26,673 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:26,674 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:26,674 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:26,674 - INFO - After Normalization*************************************** +2025-07-04 12:30:26,674 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:26,963 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:26,963 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:26,964 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:26,964 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:26,964 - INFO - After Normalization*************************************** +2025-07-04 12:30:26,964 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:30,312 - INFO - Epoch 62/150 - Train Loss: 0.349458, Val Loss: 0.353605 +2025-07-04 12:30:30,327 - INFO - New best model saved with Val Loss: 0.353605 +2025-07-04 12:30:32,607 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:32,620 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:32,620 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:32,620 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:32,620 - INFO - After Normalization*************************************** +2025-07-04 12:30:32,620 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:32,926 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:32,926 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:32,928 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:32,928 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:32,928 - INFO - After Normalization*************************************** +2025-07-04 12:30:32,928 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:33,228 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:33,228 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:33,229 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:33,229 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:33,229 - INFO - After Normalization*************************************** +2025-07-04 12:30:33,229 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:36,613 - INFO - Epoch 63/150 - Train Loss: 0.349392, Val Loss: 0.354289 +2025-07-04 12:30:38,927 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:38,940 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:38,941 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:38,941 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:38,941 - INFO - After Normalization*************************************** +2025-07-04 12:30:38,941 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:39,263 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:39,263 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:39,263 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:39,263 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:39,263 - INFO - After Normalization*************************************** +2025-07-04 12:30:39,263 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:39,553 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:39,553 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:39,553 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:39,553 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:39,553 - INFO - After Normalization*************************************** +2025-07-04 12:30:39,553 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:42,897 - INFO - Epoch 64/150 - Train Loss: 0.349203, Val Loss: 0.349903 +2025-07-04 12:30:42,913 - INFO - New best model saved with Val Loss: 0.349903 +2025-07-04 12:30:45,224 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:45,238 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:45,238 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:45,239 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:45,239 - INFO - After Normalization*************************************** +2025-07-04 12:30:45,239 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:45,548 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:45,548 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:45,548 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:45,548 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:45,549 - INFO - After Normalization*************************************** +2025-07-04 12:30:45,549 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:45,838 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:45,838 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:45,839 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:45,839 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:45,839 - INFO - After Normalization*************************************** +2025-07-04 12:30:45,839 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:49,160 - INFO - Epoch 65/150 - Train Loss: 0.348612, Val Loss: 0.350857 +2025-07-04 12:30:51,451 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:51,465 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:51,466 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:51,466 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:51,466 - INFO - After Normalization*************************************** +2025-07-04 12:30:51,466 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:51,776 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:51,776 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:51,776 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:51,776 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:51,776 - INFO - After Normalization*************************************** +2025-07-04 12:30:51,776 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:52,065 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:52,065 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:52,066 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:52,066 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:52,066 - INFO - After Normalization*************************************** +2025-07-04 12:30:52,066 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:55,378 - INFO - Epoch 66/150 - Train Loss: 0.344767, Val Loss: 0.353102 +2025-07-04 12:30:57,685 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:57,699 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:57,699 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:57,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:57,699 - INFO - After Normalization*************************************** +2025-07-04 12:30:57,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:58,008 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:58,009 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:58,009 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:58,009 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:58,009 - INFO - After Normalization*************************************** +2025-07-04 12:30:58,009 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:58,303 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:30:58,303 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:30:58,303 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:30:58,303 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:30:58,303 - INFO - After Normalization*************************************** +2025-07-04 12:30:58,303 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:01,668 - INFO - Epoch 67/150 - Train Loss: 0.343171, Val Loss: 0.346992 +2025-07-04 12:31:01,684 - INFO - New best model saved with Val Loss: 0.346992 +2025-07-04 12:31:04,008 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:04,022 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:04,023 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:04,023 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:04,023 - INFO - After Normalization*************************************** +2025-07-04 12:31:04,023 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:04,327 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:04,327 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:04,328 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:04,328 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:04,328 - INFO - After Normalization*************************************** +2025-07-04 12:31:04,328 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:04,617 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:04,617 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:04,617 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:04,618 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:04,618 - INFO - After Normalization*************************************** +2025-07-04 12:31:04,618 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:07,910 - INFO - Epoch 68/150 - Train Loss: 0.343175, Val Loss: 0.344801 +2025-07-04 12:31:07,924 - INFO - New best model saved with Val Loss: 0.344801 +2025-07-04 12:31:10,172 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:10,186 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:10,187 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:10,187 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:10,187 - INFO - After Normalization*************************************** +2025-07-04 12:31:10,187 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:10,499 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:10,499 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:10,499 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:10,499 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:10,499 - INFO - After Normalization*************************************** +2025-07-04 12:31:10,499 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:10,788 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:10,788 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:10,789 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:10,789 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:10,789 - INFO - After Normalization*************************************** +2025-07-04 12:31:10,789 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:14,033 - INFO - Epoch 69/150 - Train Loss: 0.342111, Val Loss: 0.345257 +2025-07-04 12:31:16,294 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:16,308 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:16,309 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:16,309 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:16,309 - INFO - After Normalization*************************************** +2025-07-04 12:31:16,309 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:16,628 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:16,629 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:16,630 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:16,630 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:16,630 - INFO - After Normalization*************************************** +2025-07-04 12:31:16,630 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:16,918 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:16,919 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:16,919 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:16,919 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:16,919 - INFO - After Normalization*************************************** +2025-07-04 12:31:16,919 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:20,162 - INFO - Epoch 70/150 - Train Loss: 0.338463, Val Loss: 0.343932 +2025-07-04 12:31:20,177 - INFO - New best model saved with Val Loss: 0.343932 +2025-07-04 12:31:22,584 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:22,598 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:22,599 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:22,599 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:22,599 - INFO - After Normalization*************************************** +2025-07-04 12:31:22,599 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:22,910 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:22,910 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:22,910 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:22,910 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:22,910 - INFO - After Normalization*************************************** +2025-07-04 12:31:22,910 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:23,199 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:23,199 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:23,199 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:23,199 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:23,199 - INFO - After Normalization*************************************** +2025-07-04 12:31:23,199 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:26,450 - INFO - Epoch 71/150 - Train Loss: 0.337893, Val Loss: 0.343028 +2025-07-04 12:31:26,464 - INFO - New best model saved with Val Loss: 0.343028 +2025-07-04 12:31:28,728 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:28,742 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:28,742 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:28,742 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:28,742 - INFO - After Normalization*************************************** +2025-07-04 12:31:28,742 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:29,042 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:29,042 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:29,043 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:29,043 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:29,043 - INFO - After Normalization*************************************** +2025-07-04 12:31:29,043 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:29,332 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:29,332 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:29,335 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:29,335 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:29,335 - INFO - After Normalization*************************************** +2025-07-04 12:31:29,335 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:32,568 - INFO - Epoch 72/150 - Train Loss: 0.335130, Val Loss: 0.342844 +2025-07-04 12:31:32,583 - INFO - New best model saved with Val Loss: 0.342844 +2025-07-04 12:31:34,860 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:34,874 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:34,875 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:34,875 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:34,875 - INFO - After Normalization*************************************** +2025-07-04 12:31:34,875 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:35,183 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:35,184 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:35,184 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:35,184 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:35,184 - INFO - After Normalization*************************************** +2025-07-04 12:31:35,184 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:35,502 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:35,502 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:35,502 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:35,502 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:35,502 - INFO - After Normalization*************************************** +2025-07-04 12:31:35,502 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:38,735 - INFO - Epoch 73/150 - Train Loss: 0.336449, Val Loss: 0.340494 +2025-07-04 12:31:38,751 - INFO - New best model saved with Val Loss: 0.340494 +2025-07-04 12:31:41,030 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:41,043 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:41,044 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:41,044 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:41,044 - INFO - After Normalization*************************************** +2025-07-04 12:31:41,044 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:41,361 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:41,361 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:41,361 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:41,361 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:41,361 - INFO - After Normalization*************************************** +2025-07-04 12:31:41,361 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:41,655 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:41,655 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:41,655 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:41,655 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:41,655 - INFO - After Normalization*************************************** +2025-07-04 12:31:41,655 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:44,903 - INFO - Epoch 74/150 - Train Loss: 0.332509, Val Loss: 0.336325 +2025-07-04 12:31:44,918 - INFO - New best model saved with Val Loss: 0.336325 +2025-07-04 12:31:47,173 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:47,187 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:47,187 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:47,187 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:47,187 - INFO - After Normalization*************************************** +2025-07-04 12:31:47,188 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:47,495 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:47,495 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:47,495 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:47,495 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:47,496 - INFO - After Normalization*************************************** +2025-07-04 12:31:47,496 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:47,784 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:47,784 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:47,784 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:47,784 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:47,785 - INFO - After Normalization*************************************** +2025-07-04 12:31:47,785 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:51,049 - INFO - Epoch 75/150 - Train Loss: 0.332463, Val Loss: 0.338520 +2025-07-04 12:31:53,310 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:53,324 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:53,325 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:53,325 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:53,325 - INFO - After Normalization*************************************** +2025-07-04 12:31:53,325 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:53,642 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:53,642 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:53,643 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:53,643 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:53,643 - INFO - After Normalization*************************************** +2025-07-04 12:31:53,643 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:53,931 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:53,931 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:53,932 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:53,932 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:53,932 - INFO - After Normalization*************************************** +2025-07-04 12:31:53,932 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:57,161 - INFO - Epoch 76/150 - Train Loss: 0.330725, Val Loss: 0.343105 +2025-07-04 12:31:59,449 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:59,463 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:59,463 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:59,463 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:59,463 - INFO - After Normalization*************************************** +2025-07-04 12:31:59,464 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:59,771 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:31:59,771 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:31:59,771 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:31:59,771 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:31:59,771 - INFO - After Normalization*************************************** +2025-07-04 12:31:59,771 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:00,060 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:00,060 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:00,060 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:00,060 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:00,060 - INFO - After Normalization*************************************** +2025-07-04 12:32:00,060 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:03,295 - INFO - Epoch 77/150 - Train Loss: 0.329678, Val Loss: 0.336431 +2025-07-04 12:32:05,551 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:05,566 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:05,566 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:05,566 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:05,566 - INFO - After Normalization*************************************** +2025-07-04 12:32:05,566 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:05,882 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:05,882 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:05,883 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:05,883 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:05,883 - INFO - After Normalization*************************************** +2025-07-04 12:32:05,883 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:06,171 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:06,171 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:06,172 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:06,172 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:06,172 - INFO - After Normalization*************************************** +2025-07-04 12:32:06,172 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:09,419 - INFO - Epoch 78/150 - Train Loss: 0.332115, Val Loss: 0.340799 +2025-07-04 12:32:11,689 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:11,703 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:11,703 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:11,703 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:11,703 - INFO - After Normalization*************************************** +2025-07-04 12:32:11,703 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:12,006 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:12,006 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:12,007 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:12,007 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:12,007 - INFO - After Normalization*************************************** +2025-07-04 12:32:12,007 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:12,295 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:12,295 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:12,296 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:12,296 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:12,296 - INFO - After Normalization*************************************** +2025-07-04 12:32:12,296 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:15,543 - INFO - Epoch 79/150 - Train Loss: 0.323830, Val Loss: 0.352417 +2025-07-04 12:32:17,793 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:17,806 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:17,807 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:17,807 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:17,807 - INFO - After Normalization*************************************** +2025-07-04 12:32:17,807 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:18,125 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:18,125 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:18,125 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:18,125 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:18,125 - INFO - After Normalization*************************************** +2025-07-04 12:32:18,125 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:18,414 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:18,414 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:18,414 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:18,414 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:18,414 - INFO - After Normalization*************************************** +2025-07-04 12:32:18,414 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:21,681 - INFO - Epoch 80/150 - Train Loss: 0.324538, Val Loss: 0.337843 +2025-07-04 12:32:24,082 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:24,095 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:24,095 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:24,096 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:24,096 - INFO - After Normalization*************************************** +2025-07-04 12:32:24,096 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:24,405 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:24,405 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:24,406 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:24,406 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:24,406 - INFO - After Normalization*************************************** +2025-07-04 12:32:24,406 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:24,698 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:24,699 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:24,699 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:24,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:24,699 - INFO - After Normalization*************************************** +2025-07-04 12:32:24,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:27,909 - INFO - Epoch 81/150 - Train Loss: 0.323342, Val Loss: 0.337610 +2025-07-04 12:32:30,159 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:30,173 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:30,173 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:30,173 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:30,173 - INFO - After Normalization*************************************** +2025-07-04 12:32:30,173 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:30,479 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:30,479 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:30,479 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:30,479 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:30,479 - INFO - After Normalization*************************************** +2025-07-04 12:32:30,480 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:30,772 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:30,772 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:30,772 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:30,773 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:30,773 - INFO - After Normalization*************************************** +2025-07-04 12:32:30,773 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:34,005 - INFO - Epoch 82/150 - Train Loss: 0.323520, Val Loss: 0.327724 +2025-07-04 12:32:34,021 - INFO - New best model saved with Val Loss: 0.327724 +2025-07-04 12:32:36,284 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:36,297 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:36,297 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:36,297 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:36,297 - INFO - After Normalization*************************************** +2025-07-04 12:32:36,298 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:36,615 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:36,615 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:36,616 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:36,616 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:36,616 - INFO - After Normalization*************************************** +2025-07-04 12:32:36,616 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:36,908 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:36,908 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:36,908 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:36,908 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:36,908 - INFO - After Normalization*************************************** +2025-07-04 12:32:36,908 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:40,151 - INFO - Epoch 83/150 - Train Loss: 0.322984, Val Loss: 0.328694 +2025-07-04 12:32:42,422 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:42,436 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:42,436 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:42,436 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:42,436 - INFO - After Normalization*************************************** +2025-07-04 12:32:42,436 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:42,745 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:42,745 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:42,745 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:42,745 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:42,745 - INFO - After Normalization*************************************** +2025-07-04 12:32:42,746 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:43,034 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:43,034 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:43,034 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:43,035 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:43,035 - INFO - After Normalization*************************************** +2025-07-04 12:32:43,035 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:46,276 - INFO - Epoch 84/150 - Train Loss: 0.320235, Val Loss: 0.327149 +2025-07-04 12:32:46,292 - INFO - New best model saved with Val Loss: 0.327149 +2025-07-04 12:32:48,551 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:48,565 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:48,565 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:48,565 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:48,566 - INFO - After Normalization*************************************** +2025-07-04 12:32:48,566 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:48,886 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:48,886 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:48,886 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:48,886 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:48,887 - INFO - After Normalization*************************************** +2025-07-04 12:32:48,887 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:49,175 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:49,175 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:49,175 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:49,175 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:49,176 - INFO - After Normalization*************************************** +2025-07-04 12:32:49,176 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:52,428 - INFO - Epoch 85/150 - Train Loss: 0.318422, Val Loss: 0.322234 +2025-07-04 12:32:52,442 - INFO - New best model saved with Val Loss: 0.322234 +2025-07-04 12:32:54,710 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:54,723 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:54,724 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:54,724 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:54,724 - INFO - After Normalization*************************************** +2025-07-04 12:32:54,724 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:55,030 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:55,030 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:55,030 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:55,030 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:55,030 - INFO - After Normalization*************************************** +2025-07-04 12:32:55,030 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:55,319 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:32:55,319 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:32:55,319 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:32:55,319 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:55,319 - INFO - After Normalization*************************************** +2025-07-04 12:32:55,319 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:32:58,718 - INFO - Epoch 86/150 - Train Loss: 0.316809, Val Loss: 0.323506 +2025-07-04 12:33:00,992 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:01,006 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:01,007 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:01,007 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:01,007 - INFO - After Normalization*************************************** +2025-07-04 12:33:01,007 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:01,325 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:01,325 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:01,325 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:01,325 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:01,325 - INFO - After Normalization*************************************** +2025-07-04 12:33:01,325 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:01,614 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:01,614 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:01,614 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:01,614 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:01,614 - INFO - After Normalization*************************************** +2025-07-04 12:33:01,614 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:04,853 - INFO - Epoch 87/150 - Train Loss: 0.316298, Val Loss: 0.332799 +2025-07-04 12:33:07,132 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:07,147 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:07,147 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:07,147 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:07,147 - INFO - After Normalization*************************************** +2025-07-04 12:33:07,147 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:07,452 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:07,452 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:07,453 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:07,453 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:07,453 - INFO - After Normalization*************************************** +2025-07-04 12:33:07,453 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:07,742 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:07,742 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:07,742 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:07,742 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:07,742 - INFO - After Normalization*************************************** +2025-07-04 12:33:07,742 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:10,996 - INFO - Epoch 88/150 - Train Loss: 0.317903, Val Loss: 0.324952 +2025-07-04 12:33:13,265 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:13,279 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:13,280 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:13,280 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:13,280 - INFO - After Normalization*************************************** +2025-07-04 12:33:13,280 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:13,596 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:13,596 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:13,596 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:13,596 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:13,597 - INFO - After Normalization*************************************** +2025-07-04 12:33:13,597 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:13,885 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:13,886 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:13,886 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:13,886 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:13,886 - INFO - After Normalization*************************************** +2025-07-04 12:33:13,886 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:17,145 - INFO - Epoch 89/150 - Train Loss: 0.317754, Val Loss: 0.318536 +2025-07-04 12:33:17,160 - INFO - New best model saved with Val Loss: 0.318536 +2025-07-04 12:33:19,432 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:19,446 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:19,446 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:19,446 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:19,446 - INFO - After Normalization*************************************** +2025-07-04 12:33:19,446 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:19,748 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:19,748 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:19,749 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:19,749 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:19,749 - INFO - After Normalization*************************************** +2025-07-04 12:33:19,749 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:20,042 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:20,042 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:20,042 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:20,042 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:20,042 - INFO - After Normalization*************************************** +2025-07-04 12:33:20,042 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:23,287 - INFO - Epoch 90/150 - Train Loss: 0.317134, Val Loss: 0.318901 +2025-07-04 12:33:25,652 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:25,665 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:25,665 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:25,665 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:25,665 - INFO - After Normalization*************************************** +2025-07-04 12:33:25,665 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:25,968 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:25,968 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:25,968 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:25,968 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:25,968 - INFO - After Normalization*************************************** +2025-07-04 12:33:25,968 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:26,284 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:26,284 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:26,284 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:26,284 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:26,284 - INFO - After Normalization*************************************** +2025-07-04 12:33:26,284 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:29,525 - INFO - Epoch 91/150 - Train Loss: 0.313987, Val Loss: 0.322064 +2025-07-04 12:33:31,812 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:31,826 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:31,826 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:31,827 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:31,827 - INFO - After Normalization*************************************** +2025-07-04 12:33:31,827 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:32,140 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:32,140 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:32,140 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:32,140 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:32,140 - INFO - After Normalization*************************************** +2025-07-04 12:33:32,140 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:32,429 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:32,429 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:32,430 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:32,430 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:32,430 - INFO - After Normalization*************************************** +2025-07-04 12:33:32,430 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:35,664 - INFO - Epoch 92/150 - Train Loss: 0.313308, Val Loss: 0.327740 +2025-07-04 12:33:37,941 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:37,954 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:37,954 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:37,955 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:37,955 - INFO - After Normalization*************************************** +2025-07-04 12:33:37,955 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:38,260 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:38,260 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:38,261 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:38,261 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:38,261 - INFO - After Normalization*************************************** +2025-07-04 12:33:38,261 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:38,549 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:38,549 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:38,550 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:38,550 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:38,550 - INFO - After Normalization*************************************** +2025-07-04 12:33:38,550 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:41,797 - INFO - Epoch 93/150 - Train Loss: 0.312805, Val Loss: 0.322787 +2025-07-04 12:33:44,073 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:44,088 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:44,088 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:44,088 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:44,088 - INFO - After Normalization*************************************** +2025-07-04 12:33:44,088 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:44,396 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:44,397 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:44,397 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:44,397 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:44,397 - INFO - After Normalization*************************************** +2025-07-04 12:33:44,397 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:44,686 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:44,686 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:44,686 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:44,686 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:44,686 - INFO - After Normalization*************************************** +2025-07-04 12:33:44,686 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:47,962 - INFO - Epoch 94/150 - Train Loss: 0.310842, Val Loss: 0.324050 +2025-07-04 12:33:50,220 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:50,233 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:50,233 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:50,233 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:50,233 - INFO - After Normalization*************************************** +2025-07-04 12:33:50,233 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:50,553 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:50,553 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:50,553 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:50,553 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:50,553 - INFO - After Normalization*************************************** +2025-07-04 12:33:50,553 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:50,842 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:50,842 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:50,842 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:50,842 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:50,842 - INFO - After Normalization*************************************** +2025-07-04 12:33:50,843 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:54,078 - INFO - Epoch 95/150 - Train Loss: 0.311745, Val Loss: 0.313009 +2025-07-04 12:33:54,094 - INFO - New best model saved with Val Loss: 0.313009 +2025-07-04 12:33:56,345 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:56,359 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:56,360 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:56,360 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:56,360 - INFO - After Normalization*************************************** +2025-07-04 12:33:56,360 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:56,667 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:56,667 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:56,668 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:56,668 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:56,668 - INFO - After Normalization*************************************** +2025-07-04 12:33:56,668 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:56,956 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:33:56,957 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:33:56,957 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:33:56,957 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:33:56,957 - INFO - After Normalization*************************************** +2025-07-04 12:33:56,957 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:00,212 - INFO - Epoch 96/150 - Train Loss: 0.308067, Val Loss: 0.321982 +2025-07-04 12:34:02,487 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:02,502 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:02,502 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:02,502 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:02,502 - INFO - After Normalization*************************************** +2025-07-04 12:34:02,503 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:02,820 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:02,821 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:02,821 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:02,821 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:02,821 - INFO - After Normalization*************************************** +2025-07-04 12:34:02,821 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:03,110 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:03,110 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:03,110 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:03,110 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:03,110 - INFO - After Normalization*************************************** +2025-07-04 12:34:03,110 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:06,332 - INFO - Epoch 97/150 - Train Loss: 0.306806, Val Loss: 0.330466 +2025-07-04 12:34:08,602 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:08,616 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:08,616 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:08,616 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:08,617 - INFO - After Normalization*************************************** +2025-07-04 12:34:08,617 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:08,924 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:08,924 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:08,924 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:08,924 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:08,925 - INFO - After Normalization*************************************** +2025-07-04 12:34:08,925 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:09,213 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:09,213 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:09,213 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:09,213 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:09,214 - INFO - After Normalization*************************************** +2025-07-04 12:34:09,214 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:12,446 - INFO - Epoch 98/150 - Train Loss: 0.304537, Val Loss: 0.335054 +2025-07-04 12:34:14,704 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:14,718 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:14,719 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:14,719 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:14,719 - INFO - After Normalization*************************************** +2025-07-04 12:34:14,719 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:15,019 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:15,019 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:15,020 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:15,020 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:15,020 - INFO - After Normalization*************************************** +2025-07-04 12:34:15,020 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:15,312 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:15,312 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:15,312 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:15,313 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:15,313 - INFO - After Normalization*************************************** +2025-07-04 12:34:15,313 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:18,537 - INFO - Epoch 99/150 - Train Loss: 0.303622, Val Loss: 0.331282 +2025-07-04 12:34:20,811 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:20,824 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:20,825 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:20,825 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:20,825 - INFO - After Normalization*************************************** +2025-07-04 12:34:20,825 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:21,138 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:21,138 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:21,138 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:21,138 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:21,138 - INFO - After Normalization*************************************** +2025-07-04 12:34:21,138 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:21,431 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:21,432 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:21,432 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:21,432 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:21,432 - INFO - After Normalization*************************************** +2025-07-04 12:34:21,432 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:24,668 - INFO - Epoch 100/150 - Train Loss: 0.303389, Val Loss: 0.322971 +2025-07-04 12:34:27,041 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:27,054 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:27,054 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:27,054 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:27,054 - INFO - After Normalization*************************************** +2025-07-04 12:34:27,054 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:27,371 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:27,371 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:27,371 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:27,371 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:27,371 - INFO - After Normalization*************************************** +2025-07-04 12:34:27,371 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:27,660 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:27,660 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:27,660 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:27,660 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:27,660 - INFO - After Normalization*************************************** +2025-07-04 12:34:27,660 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:30,908 - INFO - Epoch 101/150 - Train Loss: 0.304374, Val Loss: 0.331376 +2025-07-04 12:34:33,181 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:33,195 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:33,196 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:33,196 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:33,196 - INFO - After Normalization*************************************** +2025-07-04 12:34:33,196 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:33,502 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:33,502 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:33,502 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:33,502 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:33,502 - INFO - After Normalization*************************************** +2025-07-04 12:34:33,502 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:33,791 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:33,791 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:33,791 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:33,791 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:33,791 - INFO - After Normalization*************************************** +2025-07-04 12:34:33,791 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:37,021 - INFO - Epoch 102/150 - Train Loss: 0.301968, Val Loss: 0.325183 +2025-07-04 12:34:39,304 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:39,318 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:39,319 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:39,319 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:39,319 - INFO - After Normalization*************************************** +2025-07-04 12:34:39,319 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:39,635 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:39,635 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:39,635 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:39,635 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:39,636 - INFO - After Normalization*************************************** +2025-07-04 12:34:39,636 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:39,924 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:39,924 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:39,924 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:39,925 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:39,925 - INFO - After Normalization*************************************** +2025-07-04 12:34:39,925 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:43,160 - INFO - Epoch 103/150 - Train Loss: 0.298230, Val Loss: 0.322976 +2025-07-04 12:34:45,440 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:45,454 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:45,467 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:45,480 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:45,492 - INFO - After Normalization*************************************** +2025-07-04 12:34:45,502 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:45,833 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:45,834 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:45,834 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:45,834 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:45,834 - INFO - After Normalization*************************************** +2025-07-04 12:34:45,834 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:46,123 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:46,123 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:46,123 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:46,123 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:46,123 - INFO - After Normalization*************************************** +2025-07-04 12:34:46,123 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:49,341 - INFO - Epoch 104/150 - Train Loss: 0.301941, Val Loss: 0.339548 +2025-07-04 12:34:51,607 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:51,620 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:51,621 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:51,621 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:51,621 - INFO - After Normalization*************************************** +2025-07-04 12:34:51,621 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:51,929 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:51,929 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:51,929 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:51,929 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:51,929 - INFO - After Normalization*************************************** +2025-07-04 12:34:51,929 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:52,218 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:52,218 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:52,218 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:52,218 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:52,218 - INFO - After Normalization*************************************** +2025-07-04 12:34:52,218 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:55,462 - INFO - Epoch 105/150 - Train Loss: 0.301876, Val Loss: 0.337391 +2025-07-04 12:34:57,720 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:57,734 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:57,734 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:57,734 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:57,735 - INFO - After Normalization*************************************** +2025-07-04 12:34:57,735 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:58,036 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:58,036 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:58,036 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:58,036 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:58,037 - INFO - After Normalization*************************************** +2025-07-04 12:34:58,037 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:58,325 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:34:58,325 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:34:58,325 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:34:58,325 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:34:58,325 - INFO - After Normalization*************************************** +2025-07-04 12:34:58,325 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:01,559 - INFO - Epoch 106/150 - Train Loss: 0.301338, Val Loss: 0.324497 +2025-07-04 12:35:03,824 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:03,838 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:03,839 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:03,839 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:03,839 - INFO - After Normalization*************************************** +2025-07-04 12:35:03,839 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:04,151 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:04,151 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:04,152 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:04,152 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:04,152 - INFO - After Normalization*************************************** +2025-07-04 12:35:04,152 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:04,441 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:04,441 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:04,441 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:04,442 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:04,442 - INFO - After Normalization*************************************** +2025-07-04 12:35:04,442 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:07,682 - INFO - Epoch 107/150 - Train Loss: 0.297671, Val Loss: 0.313158 +2025-07-04 12:35:09,961 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:09,975 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:09,975 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:09,975 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:09,975 - INFO - After Normalization*************************************** +2025-07-04 12:35:09,975 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:10,277 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:10,277 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:10,278 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:10,278 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:10,278 - INFO - After Normalization*************************************** +2025-07-04 12:35:10,278 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:10,566 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:10,566 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:10,567 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:10,567 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:10,567 - INFO - After Normalization*************************************** +2025-07-04 12:35:10,567 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:13,812 - INFO - Epoch 108/150 - Train Loss: 0.294682, Val Loss: 0.305555 +2025-07-04 12:35:13,827 - INFO - New best model saved with Val Loss: 0.305555 +2025-07-04 12:35:16,100 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:16,114 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:16,115 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:16,115 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:16,115 - INFO - After Normalization*************************************** +2025-07-04 12:35:16,115 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:16,424 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:16,425 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:16,425 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:16,425 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:16,425 - INFO - After Normalization*************************************** +2025-07-04 12:35:16,425 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:16,718 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:16,718 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:16,718 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:16,718 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:16,718 - INFO - After Normalization*************************************** +2025-07-04 12:35:16,718 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:19,948 - INFO - Epoch 109/150 - Train Loss: 0.297884, Val Loss: 0.303263 +2025-07-04 12:35:19,962 - INFO - New best model saved with Val Loss: 0.303263 +2025-07-04 12:35:22,214 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:22,229 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:22,229 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:22,229 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:22,229 - INFO - After Normalization*************************************** +2025-07-04 12:35:22,229 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:22,531 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:22,531 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:22,532 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:22,532 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:22,532 - INFO - After Normalization*************************************** +2025-07-04 12:35:22,532 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:22,825 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:22,825 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:22,826 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:22,826 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:22,826 - INFO - After Normalization*************************************** +2025-07-04 12:35:22,826 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:26,042 - INFO - Epoch 110/150 - Train Loss: 0.297559, Val Loss: 0.302826 +2025-07-04 12:35:26,058 - INFO - New best model saved with Val Loss: 0.302826 +2025-07-04 12:35:28,434 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:28,449 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:28,449 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:28,449 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:28,449 - INFO - After Normalization*************************************** +2025-07-04 12:35:28,449 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:28,751 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:28,751 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:28,752 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:28,752 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:28,752 - INFO - After Normalization*************************************** +2025-07-04 12:35:28,752 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:29,044 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:29,044 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:29,045 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:29,045 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:29,045 - INFO - After Normalization*************************************** +2025-07-04 12:35:29,045 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:32,288 - INFO - Epoch 111/150 - Train Loss: 0.294144, Val Loss: 0.302702 +2025-07-04 12:35:32,304 - INFO - New best model saved with Val Loss: 0.302702 +2025-07-04 12:35:34,552 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:34,567 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:34,567 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:34,567 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:34,567 - INFO - After Normalization*************************************** +2025-07-04 12:35:34,567 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:34,886 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:34,886 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:34,887 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:34,887 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:34,887 - INFO - After Normalization*************************************** +2025-07-04 12:35:34,887 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:35,176 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:35,176 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:35,176 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:35,176 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:35,176 - INFO - After Normalization*************************************** +2025-07-04 12:35:35,176 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:38,420 - INFO - Epoch 112/150 - Train Loss: 0.294188, Val Loss: 0.303885 +2025-07-04 12:35:40,682 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:40,696 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:40,697 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:40,697 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:40,697 - INFO - After Normalization*************************************** +2025-07-04 12:35:40,697 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:41,003 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:41,004 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:41,004 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:41,004 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:41,004 - INFO - After Normalization*************************************** +2025-07-04 12:35:41,004 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:41,293 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:41,293 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:41,294 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:41,294 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:41,294 - INFO - After Normalization*************************************** +2025-07-04 12:35:41,294 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:44,528 - INFO - Epoch 113/150 - Train Loss: 0.294812, Val Loss: 0.302568 +2025-07-04 12:35:44,544 - INFO - New best model saved with Val Loss: 0.302568 +2025-07-04 12:35:46,818 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:46,833 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:46,833 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:46,833 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:46,833 - INFO - After Normalization*************************************** +2025-07-04 12:35:46,833 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:47,151 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:47,151 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:47,152 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:47,152 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:47,152 - INFO - After Normalization*************************************** +2025-07-04 12:35:47,152 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:47,440 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:47,440 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:47,440 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:47,441 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:47,441 - INFO - After Normalization*************************************** +2025-07-04 12:35:47,441 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:50,681 - INFO - Epoch 114/150 - Train Loss: 0.291335, Val Loss: 0.302743 +2025-07-04 12:35:52,954 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:52,967 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:52,968 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:52,968 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:52,968 - INFO - After Normalization*************************************** +2025-07-04 12:35:52,968 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:53,270 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:53,270 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:53,270 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:53,270 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:53,270 - INFO - After Normalization*************************************** +2025-07-04 12:35:53,270 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:53,559 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:53,559 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:53,559 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:53,559 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:53,560 - INFO - After Normalization*************************************** +2025-07-04 12:35:53,560 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:56,791 - INFO - Epoch 115/150 - Train Loss: 0.294847, Val Loss: 0.301672 +2025-07-04 12:35:56,807 - INFO - New best model saved with Val Loss: 0.301672 +2025-07-04 12:35:59,073 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:59,086 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:59,087 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:59,087 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:59,087 - INFO - After Normalization*************************************** +2025-07-04 12:35:59,087 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:59,400 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:59,400 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:59,400 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:59,401 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:59,401 - INFO - After Normalization*************************************** +2025-07-04 12:35:59,401 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:59,689 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:35:59,689 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:35:59,689 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:35:59,689 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:35:59,689 - INFO - After Normalization*************************************** +2025-07-04 12:35:59,690 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:02,878 - INFO - Epoch 116/150 - Train Loss: 0.291347, Val Loss: 0.301323 +2025-07-04 12:36:02,892 - INFO - New best model saved with Val Loss: 0.301323 +2025-07-04 12:36:05,154 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:05,177 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:05,177 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:05,177 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:05,177 - INFO - After Normalization*************************************** +2025-07-04 12:36:05,178 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:05,484 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:05,484 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:05,485 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:05,485 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:05,485 - INFO - After Normalization*************************************** +2025-07-04 12:36:05,485 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:05,773 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:05,773 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:05,774 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:05,774 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:05,774 - INFO - After Normalization*************************************** +2025-07-04 12:36:05,774 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:08,966 - INFO - Epoch 117/150 - Train Loss: 0.297098, Val Loss: 0.301480 +2025-07-04 12:36:11,213 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:11,227 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:11,227 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:11,227 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:11,227 - INFO - After Normalization*************************************** +2025-07-04 12:36:11,227 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:11,532 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:11,532 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:11,532 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:11,532 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:11,532 - INFO - After Normalization*************************************** +2025-07-04 12:36:11,532 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:11,821 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:11,821 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:11,821 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:11,821 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:11,821 - INFO - After Normalization*************************************** +2025-07-04 12:36:11,821 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:14,985 - INFO - Epoch 118/150 - Train Loss: 0.293385, Val Loss: 0.300978 +2025-07-04 12:36:14,999 - INFO - New best model saved with Val Loss: 0.300978 +2025-07-04 12:36:17,246 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:17,259 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:17,259 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:17,259 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:17,259 - INFO - After Normalization*************************************** +2025-07-04 12:36:17,259 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:17,564 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:17,564 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:17,564 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:17,564 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:17,564 - INFO - After Normalization*************************************** +2025-07-04 12:36:17,564 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:17,853 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:17,853 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:17,853 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:17,853 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:17,853 - INFO - After Normalization*************************************** +2025-07-04 12:36:17,853 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:21,047 - INFO - Epoch 119/150 - Train Loss: 0.302592, Val Loss: 0.299818 +2025-07-04 12:36:21,061 - INFO - New best model saved with Val Loss: 0.299818 +2025-07-04 12:36:23,306 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:23,320 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:23,320 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:23,320 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:23,320 - INFO - After Normalization*************************************** +2025-07-04 12:36:23,320 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:23,622 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:23,622 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:23,623 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:23,623 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:23,623 - INFO - After Normalization*************************************** +2025-07-04 12:36:23,623 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:23,911 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:23,911 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:23,912 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:23,912 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:23,912 - INFO - After Normalization*************************************** +2025-07-04 12:36:23,912 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:27,096 - INFO - Epoch 120/150 - Train Loss: 0.294459, Val Loss: 0.300232 +2025-07-04 12:36:29,461 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:29,474 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:29,474 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:29,475 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:29,475 - INFO - After Normalization*************************************** +2025-07-04 12:36:29,475 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:29,783 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:29,783 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:29,784 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:29,784 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:29,784 - INFO - After Normalization*************************************** +2025-07-04 12:36:29,784 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:30,072 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:30,072 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:30,072 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:30,072 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:30,073 - INFO - After Normalization*************************************** +2025-07-04 12:36:30,073 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:33,249 - INFO - Epoch 121/150 - Train Loss: 0.292162, Val Loss: 0.299877 +2025-07-04 12:36:35,482 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:35,495 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:35,496 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:35,496 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:35,496 - INFO - After Normalization*************************************** +2025-07-04 12:36:35,496 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:35,805 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:35,805 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:35,805 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:35,805 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:35,805 - INFO - After Normalization*************************************** +2025-07-04 12:36:35,806 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:36,094 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:36,094 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:36,094 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:36,094 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:36,094 - INFO - After Normalization*************************************** +2025-07-04 12:36:36,094 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:39,291 - INFO - Epoch 122/150 - Train Loss: 0.293628, Val Loss: 0.300110 +2025-07-04 12:36:41,567 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:41,579 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:41,579 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:41,580 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:41,580 - INFO - After Normalization*************************************** +2025-07-04 12:36:41,580 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:41,884 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:41,884 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:41,884 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:41,884 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:41,884 - INFO - After Normalization*************************************** +2025-07-04 12:36:41,884 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:42,173 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:42,173 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:42,173 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:42,173 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:42,173 - INFO - After Normalization*************************************** +2025-07-04 12:36:42,173 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:45,421 - INFO - Epoch 123/150 - Train Loss: 0.290856, Val Loss: 0.300606 +2025-07-04 12:36:47,684 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:47,699 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:47,699 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:47,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:47,699 - INFO - After Normalization*************************************** +2025-07-04 12:36:47,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:48,015 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:48,015 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:48,016 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:48,016 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:48,016 - INFO - After Normalization*************************************** +2025-07-04 12:36:48,016 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:48,309 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:48,309 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:48,309 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:48,309 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:48,309 - INFO - After Normalization*************************************** +2025-07-04 12:36:48,309 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:51,520 - INFO - Epoch 124/150 - Train Loss: 0.294557, Val Loss: 0.300868 +2025-07-04 12:36:53,776 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:53,789 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:53,789 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:53,789 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:53,789 - INFO - After Normalization*************************************** +2025-07-04 12:36:53,789 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:54,097 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:54,097 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:54,097 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:54,097 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:54,097 - INFO - After Normalization*************************************** +2025-07-04 12:36:54,097 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:54,389 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:54,389 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:54,390 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:54,390 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:54,390 - INFO - After Normalization*************************************** +2025-07-04 12:36:54,390 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:57,571 - INFO - Epoch 125/150 - Train Loss: 0.293174, Val Loss: 0.300792 +2025-07-04 12:36:59,819 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:36:59,834 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:36:59,834 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:36:59,834 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:36:59,834 - INFO - After Normalization*************************************** +2025-07-04 12:36:59,834 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:00,141 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:00,142 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:00,142 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:00,142 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:00,142 - INFO - After Normalization*************************************** +2025-07-04 12:37:00,142 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:00,435 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:00,435 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:00,435 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:00,435 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:00,435 - INFO - After Normalization*************************************** +2025-07-04 12:37:00,436 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:03,620 - INFO - Epoch 126/150 - Train Loss: 0.293391, Val Loss: 0.302475 +2025-07-04 12:37:05,880 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:05,894 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:05,894 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:05,894 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:05,894 - INFO - After Normalization*************************************** +2025-07-04 12:37:05,894 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:06,198 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:06,198 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:06,198 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:06,198 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:06,198 - INFO - After Normalization*************************************** +2025-07-04 12:37:06,198 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:06,490 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:06,490 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:06,491 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:06,491 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:06,491 - INFO - After Normalization*************************************** +2025-07-04 12:37:06,491 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:09,693 - INFO - Epoch 127/150 - Train Loss: 0.290767, Val Loss: 0.301194 +2025-07-04 12:37:11,925 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:11,938 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:11,939 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:11,939 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:11,939 - INFO - After Normalization*************************************** +2025-07-04 12:37:11,939 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:12,241 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:12,241 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:12,241 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:12,241 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:12,241 - INFO - After Normalization*************************************** +2025-07-04 12:37:12,241 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:12,534 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:12,534 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:12,534 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:12,534 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:12,534 - INFO - After Normalization*************************************** +2025-07-04 12:37:12,534 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:15,739 - INFO - Epoch 128/150 - Train Loss: 0.292764, Val Loss: 0.300578 +2025-07-04 12:37:17,980 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:17,994 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:17,994 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:17,994 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:17,994 - INFO - After Normalization*************************************** +2025-07-04 12:37:17,995 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:18,295 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:18,295 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:18,295 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:18,295 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:18,295 - INFO - After Normalization*************************************** +2025-07-04 12:37:18,295 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:18,588 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:18,588 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:18,588 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:18,589 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:18,589 - INFO - After Normalization*************************************** +2025-07-04 12:37:18,589 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:21,783 - INFO - Epoch 129/150 - Train Loss: 0.291864, Val Loss: 0.299356 +2025-07-04 12:37:21,799 - INFO - New best model saved with Val Loss: 0.299356 +2025-07-04 12:37:24,068 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:24,082 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:24,083 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:24,083 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:24,083 - INFO - After Normalization*************************************** +2025-07-04 12:37:24,083 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:24,397 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:24,397 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:24,398 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:24,398 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:24,398 - INFO - After Normalization*************************************** +2025-07-04 12:37:24,398 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:24,690 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:24,690 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:24,690 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:24,691 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:24,691 - INFO - After Normalization*************************************** +2025-07-04 12:37:24,691 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:27,865 - INFO - Epoch 130/150 - Train Loss: 0.293602, Val Loss: 0.299731 +2025-07-04 12:37:30,221 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:30,235 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:30,235 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:30,235 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:30,235 - INFO - After Normalization*************************************** +2025-07-04 12:37:30,235 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:30,541 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:30,542 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:30,542 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:30,542 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:30,542 - INFO - After Normalization*************************************** +2025-07-04 12:37:30,542 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:30,830 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:30,831 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:30,831 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:30,831 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:30,831 - INFO - After Normalization*************************************** +2025-07-04 12:37:30,831 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:34,017 - INFO - Epoch 131/150 - Train Loss: 0.288207, Val Loss: 0.299689 +2025-07-04 12:37:36,284 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:36,298 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:36,298 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:36,298 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:36,298 - INFO - After Normalization*************************************** +2025-07-04 12:37:36,298 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:36,615 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:36,615 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:36,616 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:36,616 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:36,616 - INFO - After Normalization*************************************** +2025-07-04 12:37:36,616 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:36,904 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:36,904 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:36,905 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:36,905 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:36,905 - INFO - After Normalization*************************************** +2025-07-04 12:37:36,905 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:40,098 - INFO - Epoch 132/150 - Train Loss: 0.292099, Val Loss: 0.299264 +2025-07-04 12:37:40,114 - INFO - New best model saved with Val Loss: 0.299264 +2025-07-04 12:37:42,378 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:42,391 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:42,392 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:42,392 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:42,392 - INFO - After Normalization*************************************** +2025-07-04 12:37:42,392 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:42,707 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:42,707 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:42,707 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:42,707 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:42,707 - INFO - After Normalization*************************************** +2025-07-04 12:37:42,707 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:42,995 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:42,996 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:42,996 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:42,996 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:42,996 - INFO - After Normalization*************************************** +2025-07-04 12:37:42,996 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:46,186 - INFO - Epoch 133/150 - Train Loss: 0.290441, Val Loss: 0.299675 +2025-07-04 12:37:48,438 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:48,453 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:48,453 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:48,453 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:48,453 - INFO - After Normalization*************************************** +2025-07-04 12:37:48,453 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:48,765 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:48,765 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:48,765 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:48,765 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:48,765 - INFO - After Normalization*************************************** +2025-07-04 12:37:48,765 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:49,054 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:49,054 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:49,054 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:49,054 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:49,054 - INFO - After Normalization*************************************** +2025-07-04 12:37:49,054 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:52,247 - INFO - Epoch 134/150 - Train Loss: 0.289598, Val Loss: 0.298882 +2025-07-04 12:37:52,262 - INFO - New best model saved with Val Loss: 0.298882 +2025-07-04 12:37:54,487 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:54,500 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:54,501 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:54,501 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:54,501 - INFO - After Normalization*************************************** +2025-07-04 12:37:54,501 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:54,811 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:54,811 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:54,811 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:54,811 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:54,811 - INFO - After Normalization*************************************** +2025-07-04 12:37:54,811 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:55,099 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:37:55,100 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:37:55,100 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:37:55,100 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:55,100 - INFO - After Normalization*************************************** +2025-07-04 12:37:55,100 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:37:58,306 - INFO - Epoch 135/150 - Train Loss: 0.291279, Val Loss: 0.298781 +2025-07-04 12:37:58,321 - INFO - New best model saved with Val Loss: 0.298781 +2025-07-04 12:38:00,575 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:00,589 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:00,589 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:00,589 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:00,589 - INFO - After Normalization*************************************** +2025-07-04 12:38:00,589 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:00,894 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:00,895 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:00,895 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:00,895 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:00,895 - INFO - After Normalization*************************************** +2025-07-04 12:38:00,895 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:01,183 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:01,183 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:01,184 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:01,184 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:01,184 - INFO - After Normalization*************************************** +2025-07-04 12:38:01,184 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:04,369 - INFO - Epoch 136/150 - Train Loss: 0.291402, Val Loss: 0.298846 +2025-07-04 12:38:06,624 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:06,638 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:06,638 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:06,638 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:06,638 - INFO - After Normalization*************************************** +2025-07-04 12:38:06,638 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:06,953 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:06,953 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:06,953 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:06,953 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:06,953 - INFO - After Normalization*************************************** +2025-07-04 12:38:06,953 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:07,241 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:07,241 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:07,242 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:07,242 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:07,242 - INFO - After Normalization*************************************** +2025-07-04 12:38:07,242 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:10,431 - INFO - Epoch 137/150 - Train Loss: 0.289009, Val Loss: 0.298318 +2025-07-04 12:38:10,447 - INFO - New best model saved with Val Loss: 0.298318 +2025-07-04 12:38:12,711 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:12,725 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:12,725 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:12,725 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:12,725 - INFO - After Normalization*************************************** +2025-07-04 12:38:12,725 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:13,038 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:13,038 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:13,038 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:13,038 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:13,038 - INFO - After Normalization*************************************** +2025-07-04 12:38:13,038 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:13,327 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:13,327 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:13,327 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:13,327 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:13,327 - INFO - After Normalization*************************************** +2025-07-04 12:38:13,327 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:16,530 - INFO - Epoch 138/150 - Train Loss: 0.290120, Val Loss: 0.299168 +2025-07-04 12:38:18,759 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:18,772 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:18,772 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:18,772 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:18,772 - INFO - After Normalization*************************************** +2025-07-04 12:38:18,772 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:19,082 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:19,082 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:19,083 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:19,083 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:19,083 - INFO - After Normalization*************************************** +2025-07-04 12:38:19,083 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:19,371 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:19,371 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:19,372 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:19,372 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:19,372 - INFO - After Normalization*************************************** +2025-07-04 12:38:19,372 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:22,579 - INFO - Epoch 139/150 - Train Loss: 0.290107, Val Loss: 0.298576 +2025-07-04 12:38:24,832 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:24,845 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:24,845 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:24,845 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:24,846 - INFO - After Normalization*************************************** +2025-07-04 12:38:24,846 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:25,152 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:25,152 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:25,152 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:25,152 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:25,152 - INFO - After Normalization*************************************** +2025-07-04 12:38:25,153 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:25,441 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:25,441 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:25,441 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:25,441 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:25,441 - INFO - After Normalization*************************************** +2025-07-04 12:38:25,441 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:28,634 - INFO - Epoch 140/150 - Train Loss: 0.292133, Val Loss: 0.298386 +2025-07-04 12:38:31,000 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:31,014 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:31,014 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:31,014 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:31,014 - INFO - After Normalization*************************************** +2025-07-04 12:38:31,014 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:31,327 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:31,327 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:31,327 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:31,327 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:31,328 - INFO - After Normalization*************************************** +2025-07-04 12:38:31,328 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:31,616 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:31,616 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:31,616 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:31,616 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:31,616 - INFO - After Normalization*************************************** +2025-07-04 12:38:31,616 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:34,813 - INFO - Epoch 141/150 - Train Loss: 0.291022, Val Loss: 0.298982 +2025-07-04 12:38:37,058 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:37,071 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:37,071 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:37,071 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:37,071 - INFO - After Normalization*************************************** +2025-07-04 12:38:37,071 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:37,381 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:37,381 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:37,381 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:37,381 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:37,381 - INFO - After Normalization*************************************** +2025-07-04 12:38:37,381 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:37,669 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:37,670 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:37,670 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:37,670 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:37,670 - INFO - After Normalization*************************************** +2025-07-04 12:38:37,670 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:40,866 - INFO - Epoch 142/150 - Train Loss: 0.289283, Val Loss: 0.298855 +2025-07-04 12:38:43,133 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:43,147 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:43,147 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:43,147 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:43,147 - INFO - After Normalization*************************************** +2025-07-04 12:38:43,147 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:43,452 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:43,452 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:43,453 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:43,453 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:43,453 - INFO - After Normalization*************************************** +2025-07-04 12:38:43,453 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:43,741 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:43,741 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:43,742 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:43,742 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:43,742 - INFO - After Normalization*************************************** +2025-07-04 12:38:43,742 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:46,953 - INFO - Epoch 143/150 - Train Loss: 0.289318, Val Loss: 0.298431 +2025-07-04 12:38:49,225 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:49,239 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:49,239 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:49,240 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:49,240 - INFO - After Normalization*************************************** +2025-07-04 12:38:49,240 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:49,544 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:49,544 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:49,544 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:49,544 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:49,544 - INFO - After Normalization*************************************** +2025-07-04 12:38:49,544 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:49,832 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:49,833 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:49,833 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:49,833 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:49,833 - INFO - After Normalization*************************************** +2025-07-04 12:38:49,833 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:53,021 - INFO - Epoch 144/150 - Train Loss: 0.293004, Val Loss: 0.299563 +2025-07-04 12:38:55,270 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:55,284 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:55,284 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:55,284 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:55,285 - INFO - After Normalization*************************************** +2025-07-04 12:38:55,285 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:55,604 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:55,604 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:55,604 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:55,604 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:55,604 - INFO - After Normalization*************************************** +2025-07-04 12:38:55,604 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:55,893 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:38:55,893 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:38:55,893 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:38:55,893 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:55,893 - INFO - After Normalization*************************************** +2025-07-04 12:38:55,893 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:38:59,093 - INFO - Epoch 145/150 - Train Loss: 0.290829, Val Loss: 0.298820 +2025-07-04 12:39:01,345 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:01,358 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:01,359 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:01,359 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:01,359 - INFO - After Normalization*************************************** +2025-07-04 12:39:01,359 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:01,672 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:01,672 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:01,672 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:01,672 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:01,672 - INFO - After Normalization*************************************** +2025-07-04 12:39:01,672 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:01,965 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:01,965 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:01,965 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:01,965 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:01,965 - INFO - After Normalization*************************************** +2025-07-04 12:39:01,965 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:05,175 - INFO - Epoch 146/150 - Train Loss: 0.293143, Val Loss: 0.299518 +2025-07-04 12:39:07,428 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:07,442 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:07,442 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:07,442 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:07,442 - INFO - After Normalization*************************************** +2025-07-04 12:39:07,442 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:07,750 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:07,750 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:07,750 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:07,750 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:07,750 - INFO - After Normalization*************************************** +2025-07-04 12:39:07,750 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:08,043 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:08,043 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:08,044 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:08,044 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:08,044 - INFO - After Normalization*************************************** +2025-07-04 12:39:08,044 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:11,287 - INFO - Epoch 147/150 - Train Loss: 0.291828, Val Loss: 0.298869 +2025-07-04 12:39:13,717 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:13,731 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:13,731 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:13,731 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:13,732 - INFO - After Normalization*************************************** +2025-07-04 12:39:13,732 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:14,037 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:14,037 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:14,038 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:14,038 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:14,038 - INFO - After Normalization*************************************** +2025-07-04 12:39:14,038 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:14,326 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:14,326 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:14,326 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:14,327 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:14,327 - INFO - After Normalization*************************************** +2025-07-04 12:39:14,327 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:17,522 - INFO - Epoch 148/150 - Train Loss: 0.290583, Val Loss: 0.299215 +2025-07-04 12:39:19,765 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:19,779 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:19,779 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:19,779 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:19,779 - INFO - After Normalization*************************************** +2025-07-04 12:39:19,779 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:20,084 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:20,084 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:20,084 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:20,084 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:20,084 - INFO - After Normalization*************************************** +2025-07-04 12:39:20,084 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:20,373 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:20,373 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:20,373 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:20,373 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:20,373 - INFO - After Normalization*************************************** +2025-07-04 12:39:20,373 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:23,581 - INFO - Epoch 149/150 - Train Loss: 0.288438, Val Loss: 0.298331 +2025-07-04 12:39:25,834 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:25,847 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:25,847 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:25,847 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:25,847 - INFO - After Normalization*************************************** +2025-07-04 12:39:25,847 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:26,168 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:26,168 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:26,168 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:26,168 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:26,168 - INFO - After Normalization*************************************** +2025-07-04 12:39:26,168 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:26,457 - INFO - before .to(local_rank)*************************************** +2025-07-04 12:39:26,457 - INFO - (device(type='cpu'), device(type='cpu')) +2025-07-04 12:39:26,457 - INFO - After .to(local_rank)*************************************** +2025-07-04 12:39:26,457 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:26,457 - INFO - After Normalization*************************************** +2025-07-04 12:39:26,457 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 12:39:29,644 - INFO - Epoch 150/150 - Train Loss: 0.290288, Val Loss: 0.298573 +2025-07-04 12:39:29,786 - INFO - Final model saved to experiments/Train_Test/final_model_tmp +2025-07-04 12:39:29,796 - INFO - Testing the final model +2025-07-04 12:39:29,796 - INFO - Testing the best model +2025-07-04 13:51:26,087 - INFO - args.exp_name : Train_Test +2025-07-04 13:51:26,088 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 13:51:26,088 - INFO - Starting training with 1 GPUs +2025-07-04 13:51:29,943 - INFO - Total trainable parameters: 1437705 +2025-07-04 13:51:29,986 - INFO - Data loaded: 3 training batches, 1 validation batches, 1 test batches +2025-07-04 13:51:29,989 - INFO - Staring training for 150 epochs +2025-07-04 13:51:33,818 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:33,821 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:33,822 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:33,822 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:33,832 - INFO - After Normalization*************************************** +2025-07-04 13:51:33,832 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:34,318 - INFO - outputs: cuda:0 +2025-07-04 13:51:34,595 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:34,595 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:34,595 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:34,595 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:34,596 - INFO - After Normalization*************************************** +2025-07-04 13:51:34,596 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:34,602 - INFO - outputs: cuda:0 +2025-07-04 13:51:34,885 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:34,885 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:34,885 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:34,885 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:34,885 - INFO - After Normalization*************************************** +2025-07-04 13:51:34,885 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:34,890 - INFO - outputs: cuda:0 +2025-07-04 13:51:38,291 - INFO - Epoch 1/150 - Train Loss: 1.283437, Val Loss: 1.146866 +2025-07-04 13:51:38,313 - INFO - New best model saved with Val Loss: 1.146866 +2025-07-04 13:51:40,594 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:40,607 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:40,608 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:40,608 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:40,608 - INFO - After Normalization*************************************** +2025-07-04 13:51:40,608 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:40,613 - INFO - outputs: cuda:0 +2025-07-04 13:51:40,918 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:40,918 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:40,918 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:40,918 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:40,918 - INFO - After Normalization*************************************** +2025-07-04 13:51:40,918 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:40,923 - INFO - outputs: cuda:0 +2025-07-04 13:51:41,207 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:41,207 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:41,207 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:41,207 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:41,207 - INFO - After Normalization*************************************** +2025-07-04 13:51:41,207 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:41,212 - INFO - outputs: cuda:0 +2025-07-04 13:51:44,431 - INFO - Epoch 2/150 - Train Loss: 1.159811, Val Loss: 1.148012 +2025-07-04 13:51:46,693 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:46,707 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:46,708 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:46,708 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:46,708 - INFO - After Normalization*************************************** +2025-07-04 13:51:46,708 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:46,713 - INFO - outputs: cuda:0 +2025-07-04 13:51:47,010 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:47,010 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:47,011 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:47,011 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:47,011 - INFO - After Normalization*************************************** +2025-07-04 13:51:47,011 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:47,016 - INFO - outputs: cuda:0 +2025-07-04 13:51:47,300 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:47,300 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:47,300 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:47,300 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:47,300 - INFO - After Normalization*************************************** +2025-07-04 13:51:47,300 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:47,305 - INFO - outputs: cuda:0 +2025-07-04 13:51:50,527 - INFO - Epoch 3/150 - Train Loss: 1.015355, Val Loss: 1.148288 +2025-07-04 13:51:52,793 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:52,808 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:52,809 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:52,809 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:52,809 - INFO - After Normalization*************************************** +2025-07-04 13:51:52,809 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:52,814 - INFO - outputs: cuda:0 +2025-07-04 13:51:53,126 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:53,126 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:53,126 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:53,126 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:53,127 - INFO - After Normalization*************************************** +2025-07-04 13:51:53,127 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:53,131 - INFO - outputs: cuda:0 +2025-07-04 13:51:53,415 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:53,415 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:53,415 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:53,415 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:53,416 - INFO - After Normalization*************************************** +2025-07-04 13:51:53,416 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:53,420 - INFO - outputs: cuda:0 +2025-07-04 13:51:56,643 - INFO - Epoch 4/150 - Train Loss: 0.916734, Val Loss: 1.249089 +2025-07-04 13:51:58,904 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:58,918 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:58,918 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:58,918 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:58,919 - INFO - After Normalization*************************************** +2025-07-04 13:51:58,919 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:58,923 - INFO - outputs: cuda:0 +2025-07-04 13:51:59,226 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:59,226 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:59,227 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:59,227 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:59,227 - INFO - After Normalization*************************************** +2025-07-04 13:51:59,227 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:59,231 - INFO - outputs: cuda:0 +2025-07-04 13:51:59,515 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:51:59,515 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:51:59,516 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:51:59,516 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:59,516 - INFO - After Normalization*************************************** +2025-07-04 13:51:59,516 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:51:59,520 - INFO - outputs: cuda:0 +2025-07-04 13:52:02,719 - INFO - Epoch 5/150 - Train Loss: 0.839702, Val Loss: 1.442995 +2025-07-04 13:52:04,972 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:04,986 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:04,986 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:04,987 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:04,987 - INFO - After Normalization*************************************** +2025-07-04 13:52:04,987 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:04,992 - INFO - outputs: cuda:0 +2025-07-04 13:52:05,291 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:05,291 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:05,292 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:05,292 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:05,292 - INFO - After Normalization*************************************** +2025-07-04 13:52:05,292 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:05,296 - INFO - outputs: cuda:0 +2025-07-04 13:52:05,585 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:05,585 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:05,585 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:05,585 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:05,585 - INFO - After Normalization*************************************** +2025-07-04 13:52:05,586 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:05,590 - INFO - outputs: cuda:0 +2025-07-04 13:52:08,791 - INFO - Epoch 6/150 - Train Loss: 0.757819, Val Loss: 1.361055 +2025-07-04 13:52:11,056 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:11,069 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:11,070 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:11,070 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:11,070 - INFO - After Normalization*************************************** +2025-07-04 13:52:11,070 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:11,075 - INFO - outputs: cuda:0 +2025-07-04 13:52:11,380 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:11,380 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:11,380 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:11,380 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:11,380 - INFO - After Normalization*************************************** +2025-07-04 13:52:11,380 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:11,385 - INFO - outputs: cuda:0 +2025-07-04 13:52:11,672 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:11,673 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:11,673 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:11,673 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:11,673 - INFO - After Normalization*************************************** +2025-07-04 13:52:11,673 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:11,678 - INFO - outputs: cuda:0 +2025-07-04 13:52:14,878 - INFO - Epoch 7/150 - Train Loss: 0.661384, Val Loss: 1.379884 +2025-07-04 13:52:17,135 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:17,149 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:17,149 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:17,149 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:17,149 - INFO - After Normalization*************************************** +2025-07-04 13:52:17,150 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:17,155 - INFO - outputs: cuda:0 +2025-07-04 13:52:17,465 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:17,466 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:17,466 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:17,466 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:17,466 - INFO - After Normalization*************************************** +2025-07-04 13:52:17,466 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:17,471 - INFO - outputs: cuda:0 +2025-07-04 13:52:17,759 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:17,759 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:17,760 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:17,760 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:17,760 - INFO - After Normalization*************************************** +2025-07-04 13:52:17,760 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:17,764 - INFO - outputs: cuda:0 +2025-07-04 13:52:20,955 - INFO - Epoch 8/150 - Train Loss: 0.608690, Val Loss: 1.595568 +2025-07-04 13:52:23,206 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:23,219 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:23,220 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:23,220 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:23,220 - INFO - After Normalization*************************************** +2025-07-04 13:52:23,220 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:23,225 - INFO - outputs: cuda:0 +2025-07-04 13:52:23,529 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:23,529 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:23,529 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:23,529 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:23,529 - INFO - After Normalization*************************************** +2025-07-04 13:52:23,529 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:23,534 - INFO - outputs: cuda:0 +2025-07-04 13:52:23,822 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:23,822 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:23,822 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:23,822 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:23,822 - INFO - After Normalization*************************************** +2025-07-04 13:52:23,823 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:23,827 - INFO - outputs: cuda:0 +2025-07-04 13:52:27,044 - INFO - Epoch 9/150 - Train Loss: 0.560139, Val Loss: 1.838627 +2025-07-04 13:52:29,299 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:29,313 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:29,314 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:29,314 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:29,314 - INFO - After Normalization*************************************** +2025-07-04 13:52:29,314 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:29,319 - INFO - outputs: cuda:0 +2025-07-04 13:52:29,617 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:29,617 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:29,617 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:29,617 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:29,617 - INFO - After Normalization*************************************** +2025-07-04 13:52:29,617 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:29,622 - INFO - outputs: cuda:0 +2025-07-04 13:52:29,910 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:29,910 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:29,911 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:29,911 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:29,911 - INFO - After Normalization*************************************** +2025-07-04 13:52:29,911 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:29,915 - INFO - outputs: cuda:0 +2025-07-04 13:52:33,108 - INFO - Epoch 10/150 - Train Loss: 0.526667, Val Loss: 1.521763 +2025-07-04 13:52:35,522 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:35,536 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:35,536 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:35,536 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:35,536 - INFO - After Normalization*************************************** +2025-07-04 13:52:35,536 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:35,541 - INFO - outputs: cuda:0 +2025-07-04 13:52:35,843 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:35,843 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:35,844 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:35,844 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:35,844 - INFO - After Normalization*************************************** +2025-07-04 13:52:35,844 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:35,849 - INFO - outputs: cuda:0 +2025-07-04 13:52:36,132 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:36,132 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:36,133 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:36,133 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:36,133 - INFO - After Normalization*************************************** +2025-07-04 13:52:36,133 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:36,137 - INFO - outputs: cuda:0 +2025-07-04 13:52:39,321 - INFO - Epoch 11/150 - Train Loss: 0.494009, Val Loss: 1.437564 +2025-07-04 13:52:41,588 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:41,602 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:41,603 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:41,603 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:41,603 - INFO - After Normalization*************************************** +2025-07-04 13:52:41,603 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:41,608 - INFO - outputs: cuda:0 +2025-07-04 13:52:41,907 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:41,907 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:41,908 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:41,908 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:41,909 - INFO - After Normalization*************************************** +2025-07-04 13:52:41,909 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:41,913 - INFO - outputs: cuda:0 +2025-07-04 13:52:42,197 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:42,197 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:42,197 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:42,197 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:42,198 - INFO - After Normalization*************************************** +2025-07-04 13:52:42,198 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:42,202 - INFO - outputs: cuda:0 +2025-07-04 13:52:45,408 - INFO - Epoch 12/150 - Train Loss: 0.474216, Val Loss: 1.348367 +2025-07-04 13:52:47,682 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:47,696 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:47,697 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:47,697 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:47,697 - INFO - After Normalization*************************************** +2025-07-04 13:52:47,697 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:47,702 - INFO - outputs: cuda:0 +2025-07-04 13:52:48,016 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:48,016 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:48,016 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:48,016 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:48,016 - INFO - After Normalization*************************************** +2025-07-04 13:52:48,016 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:48,021 - INFO - outputs: cuda:0 +2025-07-04 13:52:48,305 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:48,305 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:48,305 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:48,305 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:48,305 - INFO - After Normalization*************************************** +2025-07-04 13:52:48,305 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:48,310 - INFO - outputs: cuda:0 +2025-07-04 13:52:51,525 - INFO - Epoch 13/150 - Train Loss: 0.457834, Val Loss: 1.089559 +2025-07-04 13:52:51,541 - INFO - New best model saved with Val Loss: 1.089559 +2025-07-04 13:52:53,785 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:53,799 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:53,799 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:53,799 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:53,799 - INFO - After Normalization*************************************** +2025-07-04 13:52:53,800 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:53,804 - INFO - outputs: cuda:0 +2025-07-04 13:52:54,110 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:54,111 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:54,112 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:54,112 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:54,112 - INFO - After Normalization*************************************** +2025-07-04 13:52:54,112 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:54,117 - INFO - outputs: cuda:0 +2025-07-04 13:52:54,400 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:54,400 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:54,401 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:54,401 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:54,401 - INFO - After Normalization*************************************** +2025-07-04 13:52:54,401 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:54,405 - INFO - outputs: cuda:0 +2025-07-04 13:52:57,608 - INFO - Epoch 14/150 - Train Loss: 0.455360, Val Loss: 0.832426 +2025-07-04 13:52:57,625 - INFO - New best model saved with Val Loss: 0.832426 +2025-07-04 13:52:59,886 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:52:59,900 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:52:59,900 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:52:59,901 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:59,901 - INFO - After Normalization*************************************** +2025-07-04 13:52:59,901 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:52:59,905 - INFO - outputs: cuda:0 +2025-07-04 13:53:00,202 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:00,202 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:00,203 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:00,203 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:00,203 - INFO - After Normalization*************************************** +2025-07-04 13:53:00,203 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:00,208 - INFO - outputs: cuda:0 +2025-07-04 13:53:00,492 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:00,492 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:00,492 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:00,492 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:00,492 - INFO - After Normalization*************************************** +2025-07-04 13:53:00,492 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:00,497 - INFO - outputs: cuda:0 +2025-07-04 13:53:03,703 - INFO - Epoch 15/150 - Train Loss: 0.447783, Val Loss: 0.654857 +2025-07-04 13:53:03,717 - INFO - New best model saved with Val Loss: 0.654857 +2025-07-04 13:53:05,989 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:06,003 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:06,003 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:06,003 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:06,003 - INFO - After Normalization*************************************** +2025-07-04 13:53:06,003 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:06,008 - INFO - outputs: cuda:0 +2025-07-04 13:53:06,315 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:06,316 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:06,317 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:06,317 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:06,317 - INFO - After Normalization*************************************** +2025-07-04 13:53:06,317 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:06,322 - INFO - outputs: cuda:0 +2025-07-04 13:53:06,606 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:06,606 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:06,606 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:06,606 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:06,606 - INFO - After Normalization*************************************** +2025-07-04 13:53:06,606 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:06,611 - INFO - outputs: cuda:0 +2025-07-04 13:53:09,810 - INFO - Epoch 16/150 - Train Loss: 0.446389, Val Loss: 0.548675 +2025-07-04 13:53:09,825 - INFO - New best model saved with Val Loss: 0.548675 +2025-07-04 13:53:12,089 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:12,104 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:12,104 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:12,104 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:12,104 - INFO - After Normalization*************************************** +2025-07-04 13:53:12,104 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:12,109 - INFO - outputs: cuda:0 +2025-07-04 13:53:12,409 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:12,409 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:12,409 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:12,409 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:12,409 - INFO - After Normalization*************************************** +2025-07-04 13:53:12,409 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:12,414 - INFO - outputs: cuda:0 +2025-07-04 13:53:12,698 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:12,698 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:12,698 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:12,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:12,699 - INFO - After Normalization*************************************** +2025-07-04 13:53:12,699 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:12,703 - INFO - outputs: cuda:0 +2025-07-04 13:53:15,896 - INFO - Epoch 17/150 - Train Loss: 0.440610, Val Loss: 0.495317 +2025-07-04 13:53:15,911 - INFO - New best model saved with Val Loss: 0.495317 +2025-07-04 13:53:18,184 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:18,197 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:18,198 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:18,198 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:18,198 - INFO - After Normalization*************************************** +2025-07-04 13:53:18,198 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:18,203 - INFO - outputs: cuda:0 +2025-07-04 13:53:18,511 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:18,511 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:18,512 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:18,512 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:18,512 - INFO - After Normalization*************************************** +2025-07-04 13:53:18,512 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:18,517 - INFO - outputs: cuda:0 +2025-07-04 13:53:18,801 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:18,801 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:18,801 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:18,802 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:18,802 - INFO - After Normalization*************************************** +2025-07-04 13:53:18,802 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:18,806 - INFO - outputs: cuda:0 +2025-07-04 13:53:22,030 - INFO - Epoch 18/150 - Train Loss: 0.438833, Val Loss: 0.464275 +2025-07-04 13:53:22,045 - INFO - New best model saved with Val Loss: 0.464275 +2025-07-04 13:53:24,302 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:24,316 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:24,317 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:24,317 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:24,317 - INFO - After Normalization*************************************** +2025-07-04 13:53:24,317 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:24,322 - INFO - outputs: cuda:0 +2025-07-04 13:53:24,625 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:24,625 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:24,625 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:24,625 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:24,625 - INFO - After Normalization*************************************** +2025-07-04 13:53:24,625 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:24,630 - INFO - outputs: cuda:0 +2025-07-04 13:53:24,914 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:24,914 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:24,914 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:24,914 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:24,914 - INFO - After Normalization*************************************** +2025-07-04 13:53:24,914 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:24,919 - INFO - outputs: cuda:0 +2025-07-04 13:53:28,130 - INFO - Epoch 19/150 - Train Loss: 0.439969, Val Loss: 0.443737 +2025-07-04 13:53:28,146 - INFO - New best model saved with Val Loss: 0.443737 +2025-07-04 13:53:30,419 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:30,433 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:30,433 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:30,433 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:30,434 - INFO - After Normalization*************************************** +2025-07-04 13:53:30,434 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:30,438 - INFO - outputs: cuda:0 +2025-07-04 13:53:30,733 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:30,733 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:30,734 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:30,734 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:30,734 - INFO - After Normalization*************************************** +2025-07-04 13:53:30,734 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:30,739 - INFO - outputs: cuda:0 +2025-07-04 13:53:31,023 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:31,023 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:31,023 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:31,023 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:31,024 - INFO - After Normalization*************************************** +2025-07-04 13:53:31,024 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:31,028 - INFO - outputs: cuda:0 +2025-07-04 13:53:34,240 - INFO - Epoch 20/150 - Train Loss: 0.431883, Val Loss: 0.428062 +2025-07-04 13:53:34,258 - INFO - New best model saved with Val Loss: 0.428062 +2025-07-04 13:53:36,639 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:36,653 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:36,653 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:36,653 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:36,653 - INFO - After Normalization*************************************** +2025-07-04 13:53:36,653 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:36,658 - INFO - outputs: cuda:0 +2025-07-04 13:53:36,954 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:36,954 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:36,954 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:36,954 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:36,954 - INFO - After Normalization*************************************** +2025-07-04 13:53:36,954 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:36,959 - INFO - outputs: cuda:0 +2025-07-04 13:53:37,247 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:37,247 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:37,247 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:37,247 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:37,248 - INFO - After Normalization*************************************** +2025-07-04 13:53:37,248 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:37,252 - INFO - outputs: cuda:0 +2025-07-04 13:53:40,434 - INFO - Epoch 21/150 - Train Loss: 0.431155, Val Loss: 0.423547 +2025-07-04 13:53:40,449 - INFO - New best model saved with Val Loss: 0.423547 +2025-07-04 13:53:42,721 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:42,734 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:42,735 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:42,735 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:42,735 - INFO - After Normalization*************************************** +2025-07-04 13:53:42,735 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:42,740 - INFO - outputs: cuda:0 +2025-07-04 13:53:43,051 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:43,051 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:43,052 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:43,052 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:43,052 - INFO - After Normalization*************************************** +2025-07-04 13:53:43,052 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:43,057 - INFO - outputs: cuda:0 +2025-07-04 13:53:43,344 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:43,344 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:43,345 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:43,345 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:43,345 - INFO - After Normalization*************************************** +2025-07-04 13:53:43,345 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:43,349 - INFO - outputs: cuda:0 +2025-07-04 13:53:46,536 - INFO - Epoch 22/150 - Train Loss: 0.428144, Val Loss: 0.426146 +2025-07-04 13:53:48,810 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:48,824 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:48,824 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:48,824 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:48,824 - INFO - After Normalization*************************************** +2025-07-04 13:53:48,825 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:48,829 - INFO - outputs: cuda:0 +2025-07-04 13:53:49,133 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:49,133 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:49,133 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:49,133 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:49,133 - INFO - After Normalization*************************************** +2025-07-04 13:53:49,133 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:49,138 - INFO - outputs: cuda:0 +2025-07-04 13:53:49,426 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:49,427 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:49,427 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:49,427 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:49,427 - INFO - After Normalization*************************************** +2025-07-04 13:53:49,427 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:49,432 - INFO - outputs: cuda:0 +2025-07-04 13:53:52,637 - INFO - Epoch 23/150 - Train Loss: 0.425435, Val Loss: 0.427524 +2025-07-04 13:53:54,898 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:54,912 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:54,913 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:54,913 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:54,913 - INFO - After Normalization*************************************** +2025-07-04 13:53:54,913 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:54,918 - INFO - outputs: cuda:0 +2025-07-04 13:53:55,215 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:55,216 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:55,216 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:55,216 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:55,217 - INFO - After Normalization*************************************** +2025-07-04 13:53:55,217 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:55,222 - INFO - outputs: cuda:0 +2025-07-04 13:53:55,510 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:53:55,510 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:53:55,510 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:53:55,511 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:55,511 - INFO - After Normalization*************************************** +2025-07-04 13:53:55,511 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:53:55,515 - INFO - outputs: cuda:0 +2025-07-04 13:53:58,710 - INFO - Epoch 24/150 - Train Loss: 0.422210, Val Loss: 0.422279 +2025-07-04 13:53:58,724 - INFO - New best model saved with Val Loss: 0.422279 +2025-07-04 13:54:00,958 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:00,972 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:00,972 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:00,972 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:00,972 - INFO - After Normalization*************************************** +2025-07-04 13:54:00,972 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:00,977 - INFO - outputs: cuda:0 +2025-07-04 13:54:01,273 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:01,273 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:01,273 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:01,285 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:01,296 - INFO - After Normalization*************************************** +2025-07-04 13:54:01,307 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:01,322 - INFO - outputs: cuda:0 +2025-07-04 13:54:01,610 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:01,610 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:01,610 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:01,610 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:01,610 - INFO - After Normalization*************************************** +2025-07-04 13:54:01,610 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:01,615 - INFO - outputs: cuda:0 +2025-07-04 13:54:04,815 - INFO - Epoch 25/150 - Train Loss: 0.423424, Val Loss: 0.427988 +2025-07-04 13:54:07,071 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:07,084 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:07,085 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:07,085 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:07,085 - INFO - After Normalization*************************************** +2025-07-04 13:54:07,085 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:07,090 - INFO - outputs: cuda:0 +2025-07-04 13:54:07,398 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:07,398 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:07,399 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:07,399 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:07,399 - INFO - After Normalization*************************************** +2025-07-04 13:54:07,400 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:07,404 - INFO - outputs: cuda:0 +2025-07-04 13:54:07,688 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:07,688 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:07,688 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:07,688 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:07,689 - INFO - After Normalization*************************************** +2025-07-04 13:54:07,689 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:07,693 - INFO - outputs: cuda:0 +2025-07-04 13:54:10,897 - INFO - Epoch 26/150 - Train Loss: 0.416589, Val Loss: 0.438708 +2025-07-04 13:54:13,166 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:13,179 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:13,180 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:13,180 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:13,180 - INFO - After Normalization*************************************** +2025-07-04 13:54:13,180 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:13,185 - INFO - outputs: cuda:0 +2025-07-04 13:54:13,488 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:13,488 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:13,488 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:13,488 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:13,488 - INFO - After Normalization*************************************** +2025-07-04 13:54:13,489 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:13,493 - INFO - outputs: cuda:0 +2025-07-04 13:54:13,777 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:13,777 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:13,777 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:13,777 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:13,778 - INFO - After Normalization*************************************** +2025-07-04 13:54:13,778 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:13,782 - INFO - outputs: cuda:0 +2025-07-04 13:54:16,995 - INFO - Epoch 27/150 - Train Loss: 0.415543, Val Loss: 0.429039 +2025-07-04 13:54:19,253 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:19,266 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:19,266 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:19,266 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:19,267 - INFO - After Normalization*************************************** +2025-07-04 13:54:19,267 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:19,271 - INFO - outputs: cuda:0 +2025-07-04 13:54:19,585 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:19,585 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:19,586 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:19,586 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:19,586 - INFO - After Normalization*************************************** +2025-07-04 13:54:19,586 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:19,591 - INFO - outputs: cuda:0 +2025-07-04 13:54:19,875 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:19,875 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:19,875 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:19,876 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:19,876 - INFO - After Normalization*************************************** +2025-07-04 13:54:19,876 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:19,880 - INFO - outputs: cuda:0 +2025-07-04 13:54:23,067 - INFO - Epoch 28/150 - Train Loss: 0.413719, Val Loss: 0.420002 +2025-07-04 13:54:23,085 - INFO - New best model saved with Val Loss: 0.420002 +2025-07-04 13:54:25,337 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:25,350 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:25,351 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:25,351 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:25,351 - INFO - After Normalization*************************************** +2025-07-04 13:54:25,351 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:25,356 - INFO - outputs: cuda:0 +2025-07-04 13:54:25,668 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:25,668 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:25,668 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:25,668 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:25,668 - INFO - After Normalization*************************************** +2025-07-04 13:54:25,668 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:25,673 - INFO - outputs: cuda:0 +2025-07-04 13:54:25,957 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:25,957 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:25,957 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:25,957 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:25,957 - INFO - After Normalization*************************************** +2025-07-04 13:54:25,957 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:25,962 - INFO - outputs: cuda:0 +2025-07-04 13:54:29,178 - INFO - Epoch 29/150 - Train Loss: 0.410406, Val Loss: 0.413966 +2025-07-04 13:54:29,193 - INFO - New best model saved with Val Loss: 0.413966 +2025-07-04 13:54:31,433 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:31,446 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:31,447 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:31,447 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:31,447 - INFO - After Normalization*************************************** +2025-07-04 13:54:31,447 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:31,452 - INFO - outputs: cuda:0 +2025-07-04 13:54:31,756 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:31,756 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:31,757 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:31,757 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:31,757 - INFO - After Normalization*************************************** +2025-07-04 13:54:31,757 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:31,762 - INFO - outputs: cuda:0 +2025-07-04 13:54:32,046 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:32,046 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:32,046 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:32,046 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:32,046 - INFO - After Normalization*************************************** +2025-07-04 13:54:32,047 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:32,051 - INFO - outputs: cuda:0 +2025-07-04 13:54:35,254 - INFO - Epoch 30/150 - Train Loss: 0.409573, Val Loss: 0.411146 +2025-07-04 13:54:35,269 - INFO - New best model saved with Val Loss: 0.411146 +2025-07-04 13:54:37,646 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:37,659 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:37,660 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:37,660 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:37,660 - INFO - After Normalization*************************************** +2025-07-04 13:54:37,660 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:37,665 - INFO - outputs: cuda:0 +2025-07-04 13:54:37,968 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:37,968 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:37,968 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:37,968 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:37,968 - INFO - After Normalization*************************************** +2025-07-04 13:54:37,969 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:37,973 - INFO - outputs: cuda:0 +2025-07-04 13:54:38,257 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:38,257 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:38,257 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:38,258 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:38,258 - INFO - After Normalization*************************************** +2025-07-04 13:54:38,258 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:38,262 - INFO - outputs: cuda:0 +2025-07-04 13:54:41,473 - INFO - Epoch 31/150 - Train Loss: 0.407763, Val Loss: 0.405612 +2025-07-04 13:54:41,488 - INFO - New best model saved with Val Loss: 0.405612 +2025-07-04 13:54:43,744 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:43,758 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:43,758 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:43,758 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:43,759 - INFO - After Normalization*************************************** +2025-07-04 13:54:43,759 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:43,763 - INFO - outputs: cuda:0 +2025-07-04 13:54:44,059 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:44,059 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:44,059 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:44,060 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:44,060 - INFO - After Normalization*************************************** +2025-07-04 13:54:44,060 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:44,065 - INFO - outputs: cuda:0 +2025-07-04 13:54:44,349 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:44,349 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:44,349 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:44,349 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:44,349 - INFO - After Normalization*************************************** +2025-07-04 13:54:44,350 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:44,354 - INFO - outputs: cuda:0 +2025-07-04 13:54:47,553 - INFO - Epoch 32/150 - Train Loss: 0.407623, Val Loss: 0.406176 +2025-07-04 13:54:49,818 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:49,831 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:49,831 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:49,831 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:49,831 - INFO - After Normalization*************************************** +2025-07-04 13:54:49,831 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:49,836 - INFO - outputs: cuda:0 +2025-07-04 13:54:50,148 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:50,149 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:50,149 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:50,149 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:50,149 - INFO - After Normalization*************************************** +2025-07-04 13:54:50,149 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:50,154 - INFO - outputs: cuda:0 +2025-07-04 13:54:50,437 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:50,438 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:50,438 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:50,438 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:50,438 - INFO - After Normalization*************************************** +2025-07-04 13:54:50,438 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:50,443 - INFO - outputs: cuda:0 +2025-07-04 13:54:53,636 - INFO - Epoch 33/150 - Train Loss: 0.403604, Val Loss: 0.407962 +2025-07-04 13:54:55,892 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:55,906 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:55,906 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:55,906 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:55,907 - INFO - After Normalization*************************************** +2025-07-04 13:54:55,907 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:55,911 - INFO - outputs: cuda:0 +2025-07-04 13:54:56,217 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:56,217 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:56,217 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:56,218 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:56,218 - INFO - After Normalization*************************************** +2025-07-04 13:54:56,218 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:56,222 - INFO - outputs: cuda:0 +2025-07-04 13:54:56,506 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:54:56,507 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:54:56,507 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:54:56,507 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:56,507 - INFO - After Normalization*************************************** +2025-07-04 13:54:56,507 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:54:56,512 - INFO - outputs: cuda:0 +2025-07-04 13:54:59,721 - INFO - Epoch 34/150 - Train Loss: 0.399802, Val Loss: 0.401497 +2025-07-04 13:54:59,736 - INFO - New best model saved with Val Loss: 0.401497 +2025-07-04 13:55:02,003 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:02,016 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:02,017 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:02,017 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:02,017 - INFO - After Normalization*************************************** +2025-07-04 13:55:02,017 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:02,022 - INFO - outputs: cuda:0 +2025-07-04 13:55:02,319 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:02,319 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:02,320 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:02,320 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:02,320 - INFO - After Normalization*************************************** +2025-07-04 13:55:02,320 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:02,325 - INFO - outputs: cuda:0 +2025-07-04 13:55:02,608 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:02,608 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:02,609 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:02,609 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:02,609 - INFO - After Normalization*************************************** +2025-07-04 13:55:02,609 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:02,613 - INFO - outputs: cuda:0 +2025-07-04 13:55:05,818 - INFO - Epoch 35/150 - Train Loss: 0.400421, Val Loss: 0.400646 +2025-07-04 13:55:05,833 - INFO - New best model saved with Val Loss: 0.400646 +2025-07-04 13:55:08,088 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:08,102 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:08,102 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:08,102 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:08,102 - INFO - After Normalization*************************************** +2025-07-04 13:55:08,102 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:08,107 - INFO - outputs: cuda:0 +2025-07-04 13:55:08,421 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:08,421 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:08,421 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:08,421 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:08,421 - INFO - After Normalization*************************************** +2025-07-04 13:55:08,422 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:08,426 - INFO - outputs: cuda:0 +2025-07-04 13:55:08,714 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:08,715 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:08,715 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:08,715 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:08,716 - INFO - After Normalization*************************************** +2025-07-04 13:55:08,716 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:08,720 - INFO - outputs: cuda:0 +2025-07-04 13:55:11,928 - INFO - Epoch 36/150 - Train Loss: 0.395775, Val Loss: 0.403946 +2025-07-04 13:55:14,179 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:14,192 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:14,193 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:14,193 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:14,193 - INFO - After Normalization*************************************** +2025-07-04 13:55:14,193 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:14,198 - INFO - outputs: cuda:0 +2025-07-04 13:55:14,503 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:14,503 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:14,504 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:14,504 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:14,504 - INFO - After Normalization*************************************** +2025-07-04 13:55:14,504 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:14,508 - INFO - outputs: cuda:0 +2025-07-04 13:55:14,795 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:14,795 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:14,796 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:14,796 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:14,796 - INFO - After Normalization*************************************** +2025-07-04 13:55:14,796 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:14,800 - INFO - outputs: cuda:0 +2025-07-04 13:55:17,996 - INFO - Epoch 37/150 - Train Loss: 0.395949, Val Loss: 0.401222 +2025-07-04 13:55:20,248 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:20,262 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:20,262 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:20,262 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:20,262 - INFO - After Normalization*************************************** +2025-07-04 13:55:20,262 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:20,267 - INFO - outputs: cuda:0 +2025-07-04 13:55:20,569 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:20,569 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:20,570 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:20,570 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:20,570 - INFO - After Normalization*************************************** +2025-07-04 13:55:20,570 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:20,575 - INFO - outputs: cuda:0 +2025-07-04 13:55:20,862 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:20,862 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:20,862 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:20,863 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:20,863 - INFO - After Normalization*************************************** +2025-07-04 13:55:20,863 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:20,868 - INFO - outputs: cuda:0 +2025-07-04 13:55:24,067 - INFO - Epoch 38/150 - Train Loss: 0.393724, Val Loss: 0.398120 +2025-07-04 13:55:24,083 - INFO - New best model saved with Val Loss: 0.398120 +2025-07-04 13:55:26,355 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:26,368 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:26,369 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:26,369 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:26,369 - INFO - After Normalization*************************************** +2025-07-04 13:55:26,369 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:26,373 - INFO - outputs: cuda:0 +2025-07-04 13:55:26,677 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:26,677 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:26,677 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:26,677 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:26,677 - INFO - After Normalization*************************************** +2025-07-04 13:55:26,677 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:26,682 - INFO - outputs: cuda:0 +2025-07-04 13:55:26,969 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:26,969 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:26,969 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:26,969 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:26,969 - INFO - After Normalization*************************************** +2025-07-04 13:55:26,969 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:26,974 - INFO - outputs: cuda:0 +2025-07-04 13:55:30,194 - INFO - Epoch 39/150 - Train Loss: 0.390039, Val Loss: 0.399025 +2025-07-04 13:55:32,462 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:32,477 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:32,477 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:32,477 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:32,477 - INFO - After Normalization*************************************** +2025-07-04 13:55:32,477 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:32,482 - INFO - outputs: cuda:0 +2025-07-04 13:55:32,791 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:32,791 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:32,792 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:32,792 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:32,792 - INFO - After Normalization*************************************** +2025-07-04 13:55:32,792 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:32,797 - INFO - outputs: cuda:0 +2025-07-04 13:55:33,080 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:33,080 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:33,081 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:33,081 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:33,082 - INFO - After Normalization*************************************** +2025-07-04 13:55:33,082 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:33,086 - INFO - outputs: cuda:0 +2025-07-04 13:55:36,289 - INFO - Epoch 40/150 - Train Loss: 0.391684, Val Loss: 0.390313 +2025-07-04 13:55:36,305 - INFO - New best model saved with Val Loss: 0.390313 +2025-07-04 13:55:38,663 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:38,678 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:38,678 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:38,678 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:38,678 - INFO - After Normalization*************************************** +2025-07-04 13:55:38,678 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:38,684 - INFO - outputs: cuda:0 +2025-07-04 13:55:38,996 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:38,997 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:38,997 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:38,997 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:38,997 - INFO - After Normalization*************************************** +2025-07-04 13:55:38,997 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:39,002 - INFO - outputs: cuda:0 +2025-07-04 13:55:39,286 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:39,286 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:39,286 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:39,286 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:39,286 - INFO - After Normalization*************************************** +2025-07-04 13:55:39,286 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:39,291 - INFO - outputs: cuda:0 +2025-07-04 13:55:42,498 - INFO - Epoch 41/150 - Train Loss: 0.386746, Val Loss: 0.386531 +2025-07-04 13:55:42,513 - INFO - New best model saved with Val Loss: 0.386531 +2025-07-04 13:55:44,769 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:44,783 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:44,784 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:44,784 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:44,784 - INFO - After Normalization*************************************** +2025-07-04 13:55:44,784 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:44,789 - INFO - outputs: cuda:0 +2025-07-04 13:55:45,093 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:45,093 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:45,093 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:45,093 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:45,094 - INFO - After Normalization*************************************** +2025-07-04 13:55:45,094 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:45,098 - INFO - outputs: cuda:0 +2025-07-04 13:55:45,382 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:45,382 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:45,382 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:45,383 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:45,384 - INFO - After Normalization*************************************** +2025-07-04 13:55:45,384 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:45,388 - INFO - outputs: cuda:0 +2025-07-04 13:55:48,590 - INFO - Epoch 42/150 - Train Loss: 0.386503, Val Loss: 0.384749 +2025-07-04 13:55:48,605 - INFO - New best model saved with Val Loss: 0.384749 +2025-07-04 13:55:50,845 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:50,859 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:50,859 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:50,860 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:50,860 - INFO - After Normalization*************************************** +2025-07-04 13:55:50,860 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:50,865 - INFO - outputs: cuda:0 +2025-07-04 13:55:51,164 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:51,164 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:51,164 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:51,164 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:51,165 - INFO - After Normalization*************************************** +2025-07-04 13:55:51,165 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:51,169 - INFO - outputs: cuda:0 +2025-07-04 13:55:51,453 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:51,453 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:51,454 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:51,454 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:51,454 - INFO - After Normalization*************************************** +2025-07-04 13:55:51,454 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:51,458 - INFO - outputs: cuda:0 +2025-07-04 13:55:54,644 - INFO - Epoch 43/150 - Train Loss: 0.385221, Val Loss: 0.382403 +2025-07-04 13:55:54,657 - INFO - New best model saved with Val Loss: 0.382403 +2025-07-04 13:55:56,914 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:56,928 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:56,928 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:56,929 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:56,929 - INFO - After Normalization*************************************** +2025-07-04 13:55:56,929 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:56,933 - INFO - outputs: cuda:0 +2025-07-04 13:55:57,246 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:57,246 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:57,247 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:57,247 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:57,247 - INFO - After Normalization*************************************** +2025-07-04 13:55:57,247 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:57,251 - INFO - outputs: cuda:0 +2025-07-04 13:55:57,536 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:55:57,536 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:55:57,536 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:55:57,536 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:57,537 - INFO - After Normalization*************************************** +2025-07-04 13:55:57,537 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:55:57,542 - INFO - outputs: cuda:0 +2025-07-04 13:56:00,748 - INFO - Epoch 44/150 - Train Loss: 0.379563, Val Loss: 0.379401 +2025-07-04 13:56:00,762 - INFO - New best model saved with Val Loss: 0.379401 +2025-07-04 13:56:03,034 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:03,048 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:03,048 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:03,048 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:03,049 - INFO - After Normalization*************************************** +2025-07-04 13:56:03,049 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:03,053 - INFO - outputs: cuda:0 +2025-07-04 13:56:03,356 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:03,356 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:03,356 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:03,356 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:03,356 - INFO - After Normalization*************************************** +2025-07-04 13:56:03,356 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:03,361 - INFO - outputs: cuda:0 +2025-07-04 13:56:03,645 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:03,645 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:03,645 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:03,645 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:03,645 - INFO - After Normalization*************************************** +2025-07-04 13:56:03,645 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:03,650 - INFO - outputs: cuda:0 +2025-07-04 13:56:06,848 - INFO - Epoch 45/150 - Train Loss: 0.381152, Val Loss: 0.383046 +2025-07-04 13:56:09,119 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:09,132 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:09,133 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:09,133 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:09,133 - INFO - After Normalization*************************************** +2025-07-04 13:56:09,133 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:09,138 - INFO - outputs: cuda:0 +2025-07-04 13:56:09,435 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:09,435 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:09,435 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:09,435 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:09,435 - INFO - After Normalization*************************************** +2025-07-04 13:56:09,435 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:09,440 - INFO - outputs: cuda:0 +2025-07-04 13:56:09,724 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:09,724 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:09,724 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:09,724 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:09,724 - INFO - After Normalization*************************************** +2025-07-04 13:56:09,725 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:09,730 - INFO - outputs: cuda:0 +2025-07-04 13:56:12,933 - INFO - Epoch 46/150 - Train Loss: 0.377841, Val Loss: 0.374077 +2025-07-04 13:56:12,950 - INFO - New best model saved with Val Loss: 0.374077 +2025-07-04 13:56:15,218 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:15,232 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:15,232 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:15,232 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:15,232 - INFO - After Normalization*************************************** +2025-07-04 13:56:15,232 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:15,237 - INFO - outputs: cuda:0 +2025-07-04 13:56:15,548 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:15,548 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:15,548 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:15,548 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:15,548 - INFO - After Normalization*************************************** +2025-07-04 13:56:15,548 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:15,553 - INFO - outputs: cuda:0 +2025-07-04 13:56:15,837 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:15,837 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:15,837 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:15,837 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:15,837 - INFO - After Normalization*************************************** +2025-07-04 13:56:15,837 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:15,842 - INFO - outputs: cuda:0 +2025-07-04 13:56:19,067 - INFO - Epoch 47/150 - Train Loss: 0.376989, Val Loss: 0.374267 +2025-07-04 13:56:21,327 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:21,342 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:21,342 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:21,342 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:21,342 - INFO - After Normalization*************************************** +2025-07-04 13:56:21,342 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:21,347 - INFO - outputs: cuda:0 +2025-07-04 13:56:21,648 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:21,649 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:21,649 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:21,649 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:21,649 - INFO - After Normalization*************************************** +2025-07-04 13:56:21,649 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:21,654 - INFO - outputs: cuda:0 +2025-07-04 13:56:21,938 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:21,938 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:21,938 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:21,938 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:21,938 - INFO - After Normalization*************************************** +2025-07-04 13:56:21,938 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:21,944 - INFO - outputs: cuda:0 +2025-07-04 13:56:25,155 - INFO - Epoch 48/150 - Train Loss: 0.372625, Val Loss: 0.375827 +2025-07-04 13:56:27,419 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:27,434 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:27,434 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:27,434 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:27,434 - INFO - After Normalization*************************************** +2025-07-04 13:56:27,434 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:27,439 - INFO - outputs: cuda:0 +2025-07-04 13:56:27,734 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:27,734 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:27,735 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:27,735 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:27,735 - INFO - After Normalization*************************************** +2025-07-04 13:56:27,735 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:27,740 - INFO - outputs: cuda:0 +2025-07-04 13:56:28,023 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:28,023 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:28,024 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:28,024 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:28,024 - INFO - After Normalization*************************************** +2025-07-04 13:56:28,024 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:28,029 - INFO - outputs: cuda:0 +2025-07-04 13:56:31,223 - INFO - Epoch 49/150 - Train Loss: 0.373013, Val Loss: 0.377917 +2025-07-04 13:56:33,485 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:33,498 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:33,499 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:33,499 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:33,499 - INFO - After Normalization*************************************** +2025-07-04 13:56:33,499 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:33,504 - INFO - outputs: cuda:0 +2025-07-04 13:56:33,817 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:33,817 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:33,817 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:33,817 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:33,817 - INFO - After Normalization*************************************** +2025-07-04 13:56:33,817 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:33,822 - INFO - outputs: cuda:0 +2025-07-04 13:56:34,106 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:34,106 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:34,106 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:34,106 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:34,106 - INFO - After Normalization*************************************** +2025-07-04 13:56:34,106 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:34,111 - INFO - outputs: cuda:0 +2025-07-04 13:56:37,307 - INFO - Epoch 50/150 - Train Loss: 0.370192, Val Loss: 0.374004 +2025-07-04 13:56:37,323 - INFO - New best model saved with Val Loss: 0.374004 +2025-07-04 13:56:39,714 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:39,728 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:39,729 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:39,729 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:39,729 - INFO - After Normalization*************************************** +2025-07-04 13:56:39,729 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:39,734 - INFO - outputs: cuda:0 +2025-07-04 13:56:40,046 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:40,046 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:40,046 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:40,047 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:40,047 - INFO - After Normalization*************************************** +2025-07-04 13:56:40,047 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:40,051 - INFO - outputs: cuda:0 +2025-07-04 13:56:40,339 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:40,340 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:40,340 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:40,340 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:40,340 - INFO - After Normalization*************************************** +2025-07-04 13:56:40,340 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:40,345 - INFO - outputs: cuda:0 +2025-07-04 13:56:43,555 - INFO - Epoch 51/150 - Train Loss: 0.370575, Val Loss: 0.368872 +2025-07-04 13:56:43,569 - INFO - New best model saved with Val Loss: 0.368872 +2025-07-04 13:56:45,843 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:45,857 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:45,858 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:45,858 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:45,858 - INFO - After Normalization*************************************** +2025-07-04 13:56:45,858 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:45,863 - INFO - outputs: cuda:0 +2025-07-04 13:56:46,164 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:46,164 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:46,164 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:46,164 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:46,165 - INFO - After Normalization*************************************** +2025-07-04 13:56:46,165 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:46,169 - INFO - outputs: cuda:0 +2025-07-04 13:56:46,458 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:46,458 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:46,458 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:46,458 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:46,458 - INFO - After Normalization*************************************** +2025-07-04 13:56:46,458 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:46,463 - INFO - outputs: cuda:0 +2025-07-04 13:56:49,676 - INFO - Epoch 52/150 - Train Loss: 0.366457, Val Loss: 0.366532 +2025-07-04 13:56:49,693 - INFO - New best model saved with Val Loss: 0.366532 +2025-07-04 13:56:51,961 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:51,975 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:51,975 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:51,976 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:51,976 - INFO - After Normalization*************************************** +2025-07-04 13:56:51,976 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:51,980 - INFO - outputs: cuda:0 +2025-07-04 13:56:52,298 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:52,298 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:52,298 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:52,299 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:52,299 - INFO - After Normalization*************************************** +2025-07-04 13:56:52,299 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:52,303 - INFO - outputs: cuda:0 +2025-07-04 13:56:52,587 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:52,587 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:52,588 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:52,588 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:52,588 - INFO - After Normalization*************************************** +2025-07-04 13:56:52,588 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:52,592 - INFO - outputs: cuda:0 +2025-07-04 13:56:55,773 - INFO - Epoch 53/150 - Train Loss: 0.364227, Val Loss: 0.364998 +2025-07-04 13:56:55,788 - INFO - New best model saved with Val Loss: 0.364998 +2025-07-04 13:56:58,045 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:58,059 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:58,060 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:58,060 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:58,060 - INFO - After Normalization*************************************** +2025-07-04 13:56:58,060 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:58,065 - INFO - outputs: cuda:0 +2025-07-04 13:56:58,373 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:58,374 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:58,374 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:58,374 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:58,374 - INFO - After Normalization*************************************** +2025-07-04 13:56:58,374 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:58,379 - INFO - outputs: cuda:0 +2025-07-04 13:56:58,663 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:56:58,663 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:56:58,663 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:56:58,663 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:58,663 - INFO - After Normalization*************************************** +2025-07-04 13:56:58,663 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:56:58,668 - INFO - outputs: cuda:0 +2025-07-04 13:57:01,858 - INFO - Epoch 54/150 - Train Loss: 0.362108, Val Loss: 0.361583 +2025-07-04 13:57:01,874 - INFO - New best model saved with Val Loss: 0.361583 +2025-07-04 13:57:04,127 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:04,141 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:04,141 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:04,141 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:04,141 - INFO - After Normalization*************************************** +2025-07-04 13:57:04,141 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:04,146 - INFO - outputs: cuda:0 +2025-07-04 13:57:04,481 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:04,481 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:04,481 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:04,481 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:04,481 - INFO - After Normalization*************************************** +2025-07-04 13:57:04,481 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:04,486 - INFO - outputs: cuda:0 +2025-07-04 13:57:04,770 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:04,770 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:04,770 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:04,770 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:04,770 - INFO - After Normalization*************************************** +2025-07-04 13:57:04,770 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:04,775 - INFO - outputs: cuda:0 +2025-07-04 13:57:07,972 - INFO - Epoch 55/150 - Train Loss: 0.362925, Val Loss: 0.360435 +2025-07-04 13:57:07,988 - INFO - New best model saved with Val Loss: 0.360435 +2025-07-04 13:57:10,261 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:10,275 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:10,276 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:10,276 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:10,276 - INFO - After Normalization*************************************** +2025-07-04 13:57:10,276 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:10,281 - INFO - outputs: cuda:0 +2025-07-04 13:57:10,595 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:10,595 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:10,596 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:10,596 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:10,596 - INFO - After Normalization*************************************** +2025-07-04 13:57:10,596 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:10,600 - INFO - outputs: cuda:0 +2025-07-04 13:57:10,884 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:10,884 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:10,885 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:10,885 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:10,885 - INFO - After Normalization*************************************** +2025-07-04 13:57:10,885 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:10,889 - INFO - outputs: cuda:0 +2025-07-04 13:57:14,089 - INFO - Epoch 56/150 - Train Loss: 0.360084, Val Loss: 0.360202 +2025-07-04 13:57:14,104 - INFO - New best model saved with Val Loss: 0.360202 +2025-07-04 13:57:16,369 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:16,383 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:16,383 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:16,383 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:16,383 - INFO - After Normalization*************************************** +2025-07-04 13:57:16,383 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:16,388 - INFO - outputs: cuda:0 +2025-07-04 13:57:16,689 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:16,689 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:16,690 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:16,690 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:16,690 - INFO - After Normalization*************************************** +2025-07-04 13:57:16,690 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:16,694 - INFO - outputs: cuda:0 +2025-07-04 13:57:16,978 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:16,978 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:16,978 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:16,979 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:16,979 - INFO - After Normalization*************************************** +2025-07-04 13:57:16,979 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:16,983 - INFO - outputs: cuda:0 +2025-07-04 13:57:20,193 - INFO - Epoch 57/150 - Train Loss: 0.356597, Val Loss: 0.362886 +2025-07-04 13:57:22,449 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:22,462 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:22,463 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:22,463 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:22,463 - INFO - After Normalization*************************************** +2025-07-04 13:57:22,463 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:22,468 - INFO - outputs: cuda:0 +2025-07-04 13:57:22,764 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:22,765 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:22,765 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:22,765 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:22,765 - INFO - After Normalization*************************************** +2025-07-04 13:57:22,765 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:22,770 - INFO - outputs: cuda:0 +2025-07-04 13:57:23,053 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:23,053 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:23,054 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:23,054 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:23,054 - INFO - After Normalization*************************************** +2025-07-04 13:57:23,054 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:23,058 - INFO - outputs: cuda:0 +2025-07-04 13:57:26,270 - INFO - Epoch 58/150 - Train Loss: 0.359285, Val Loss: 0.363573 +2025-07-04 13:57:28,516 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:28,530 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:28,531 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:28,531 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:28,531 - INFO - After Normalization*************************************** +2025-07-04 13:57:28,531 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:28,536 - INFO - outputs: cuda:0 +2025-07-04 13:57:28,851 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:28,851 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:28,851 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:28,851 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:28,851 - INFO - After Normalization*************************************** +2025-07-04 13:57:28,851 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:28,856 - INFO - outputs: cuda:0 +2025-07-04 13:57:29,140 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:29,140 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:29,140 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:29,140 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:29,140 - INFO - After Normalization*************************************** +2025-07-04 13:57:29,140 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:29,145 - INFO - outputs: cuda:0 +2025-07-04 13:57:32,342 - INFO - Epoch 59/150 - Train Loss: 0.353965, Val Loss: 0.367631 +2025-07-04 13:57:34,599 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:34,612 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:34,613 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:34,613 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:34,613 - INFO - After Normalization*************************************** +2025-07-04 13:57:34,613 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:34,618 - INFO - outputs: cuda:0 +2025-07-04 13:57:34,926 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:34,927 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:34,927 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:34,927 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:34,927 - INFO - After Normalization*************************************** +2025-07-04 13:57:34,927 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:34,932 - INFO - outputs: cuda:0 +2025-07-04 13:57:35,216 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:35,216 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:35,216 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:35,216 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:35,216 - INFO - After Normalization*************************************** +2025-07-04 13:57:35,216 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:35,221 - INFO - outputs: cuda:0 +2025-07-04 13:57:38,408 - INFO - Epoch 60/150 - Train Loss: 0.355830, Val Loss: 0.363127 +2025-07-04 13:57:40,789 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:40,803 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:40,804 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:40,804 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:40,804 - INFO - After Normalization*************************************** +2025-07-04 13:57:40,804 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:40,809 - INFO - outputs: cuda:0 +2025-07-04 13:57:41,122 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:41,122 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:41,122 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:41,122 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:41,122 - INFO - After Normalization*************************************** +2025-07-04 13:57:41,122 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:41,127 - INFO - outputs: cuda:0 +2025-07-04 13:57:41,411 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:41,411 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:41,411 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:41,411 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:41,411 - INFO - After Normalization*************************************** +2025-07-04 13:57:41,411 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:41,416 - INFO - outputs: cuda:0 +2025-07-04 13:57:44,607 - INFO - Epoch 61/150 - Train Loss: 0.352915, Val Loss: 0.355417 +2025-07-04 13:57:44,622 - INFO - New best model saved with Val Loss: 0.355417 +2025-07-04 13:57:46,864 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:46,878 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:46,878 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:46,878 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:46,878 - INFO - After Normalization*************************************** +2025-07-04 13:57:46,878 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:46,883 - INFO - outputs: cuda:0 +2025-07-04 13:57:47,189 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:47,189 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:47,189 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:47,189 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:47,189 - INFO - After Normalization*************************************** +2025-07-04 13:57:47,189 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:47,194 - INFO - outputs: cuda:0 +2025-07-04 13:57:47,478 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:47,478 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:47,478 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:47,478 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:47,478 - INFO - After Normalization*************************************** +2025-07-04 13:57:47,478 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:47,483 - INFO - outputs: cuda:0 +2025-07-04 13:57:50,710 - INFO - Epoch 62/150 - Train Loss: 0.349458, Val Loss: 0.353605 +2025-07-04 13:57:50,725 - INFO - New best model saved with Val Loss: 0.353605 +2025-07-04 13:57:52,982 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:52,996 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:52,999 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:52,999 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:52,999 - INFO - After Normalization*************************************** +2025-07-04 13:57:52,999 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:53,004 - INFO - outputs: cuda:0 +2025-07-04 13:57:53,301 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:53,301 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:53,302 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:53,302 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:53,302 - INFO - After Normalization*************************************** +2025-07-04 13:57:53,302 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:53,306 - INFO - outputs: cuda:0 +2025-07-04 13:57:53,593 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:53,593 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:53,594 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:53,594 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:53,594 - INFO - After Normalization*************************************** +2025-07-04 13:57:53,594 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:53,598 - INFO - outputs: cuda:0 +2025-07-04 13:57:56,807 - INFO - Epoch 63/150 - Train Loss: 0.349392, Val Loss: 0.354289 +2025-07-04 13:57:59,074 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:59,088 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:59,088 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:59,088 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:59,089 - INFO - After Normalization*************************************** +2025-07-04 13:57:59,089 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:59,093 - INFO - outputs: cuda:0 +2025-07-04 13:57:59,406 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:59,406 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:59,407 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:59,407 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:59,407 - INFO - After Normalization*************************************** +2025-07-04 13:57:59,407 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:59,412 - INFO - outputs: cuda:0 +2025-07-04 13:57:59,698 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:57:59,699 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:57:59,699 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:57:59,699 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:59,699 - INFO - After Normalization*************************************** +2025-07-04 13:57:59,699 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:57:59,704 - INFO - outputs: cuda:0 +2025-07-04 13:58:02,900 - INFO - Epoch 64/150 - Train Loss: 0.349203, Val Loss: 0.349903 +2025-07-04 13:58:02,915 - INFO - New best model saved with Val Loss: 0.349903 +2025-07-04 13:58:05,165 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:05,179 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:05,179 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:05,179 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:05,180 - INFO - After Normalization*************************************** +2025-07-04 13:58:05,180 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:05,184 - INFO - outputs: cuda:0 +2025-07-04 13:58:05,489 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:05,489 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:05,489 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:05,489 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:05,489 - INFO - After Normalization*************************************** +2025-07-04 13:58:05,489 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:05,494 - INFO - outputs: cuda:0 +2025-07-04 13:58:05,801 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:05,802 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:05,802 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:05,802 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:05,802 - INFO - After Normalization*************************************** +2025-07-04 13:58:05,802 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:05,807 - INFO - outputs: cuda:0 +2025-07-04 13:58:09,013 - INFO - Epoch 65/150 - Train Loss: 0.348612, Val Loss: 0.350857 +2025-07-04 13:58:11,278 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:11,291 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:11,291 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:11,292 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:11,292 - INFO - After Normalization*************************************** +2025-07-04 13:58:11,292 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:11,296 - INFO - outputs: cuda:0 +2025-07-04 13:58:11,594 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:11,594 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:11,594 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:11,594 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:11,594 - INFO - After Normalization*************************************** +2025-07-04 13:58:11,594 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:11,599 - INFO - outputs: cuda:0 +2025-07-04 13:58:11,886 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:11,886 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:11,887 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:11,887 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:11,887 - INFO - After Normalization*************************************** +2025-07-04 13:58:11,887 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:11,891 - INFO - outputs: cuda:0 +2025-07-04 13:58:15,110 - INFO - Epoch 66/150 - Train Loss: 0.344767, Val Loss: 0.353102 +2025-07-04 13:58:17,386 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:17,400 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:17,400 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:17,400 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:17,400 - INFO - After Normalization*************************************** +2025-07-04 13:58:17,400 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:17,405 - INFO - outputs: cuda:0 +2025-07-04 13:58:17,718 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:17,718 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:17,718 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:17,719 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:17,719 - INFO - After Normalization*************************************** +2025-07-04 13:58:17,719 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:17,723 - INFO - outputs: cuda:0 +2025-07-04 13:58:18,007 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:18,007 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:18,008 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:18,008 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:18,008 - INFO - After Normalization*************************************** +2025-07-04 13:58:18,008 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:18,012 - INFO - outputs: cuda:0 +2025-07-04 13:58:21,224 - INFO - Epoch 67/150 - Train Loss: 0.343171, Val Loss: 0.346992 +2025-07-04 13:58:21,238 - INFO - New best model saved with Val Loss: 0.346992 +2025-07-04 13:58:23,500 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:23,515 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:23,515 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:23,515 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:23,515 - INFO - After Normalization*************************************** +2025-07-04 13:58:23,515 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:23,520 - INFO - outputs: cuda:0 +2025-07-04 13:58:23,824 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:23,824 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:23,824 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:23,824 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:23,824 - INFO - After Normalization*************************************** +2025-07-04 13:58:23,824 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:23,829 - INFO - outputs: cuda:0 +2025-07-04 13:58:24,113 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:24,113 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:24,113 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:24,113 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:24,113 - INFO - After Normalization*************************************** +2025-07-04 13:58:24,114 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:24,118 - INFO - outputs: cuda:0 +2025-07-04 13:58:27,326 - INFO - Epoch 68/150 - Train Loss: 0.343175, Val Loss: 0.344801 +2025-07-04 13:58:27,341 - INFO - New best model saved with Val Loss: 0.344801 +2025-07-04 13:58:29,590 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:29,605 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:29,605 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:29,605 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:29,605 - INFO - After Normalization*************************************** +2025-07-04 13:58:29,605 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:29,610 - INFO - outputs: cuda:0 +2025-07-04 13:58:29,919 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:29,920 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:29,920 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:29,920 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:29,920 - INFO - After Normalization*************************************** +2025-07-04 13:58:29,920 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:29,925 - INFO - outputs: cuda:0 +2025-07-04 13:58:30,208 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:30,209 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:30,209 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:30,209 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:30,209 - INFO - After Normalization*************************************** +2025-07-04 13:58:30,209 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:30,214 - INFO - outputs: cuda:0 +2025-07-04 13:58:33,437 - INFO - Epoch 69/150 - Train Loss: 0.342111, Val Loss: 0.345257 +2025-07-04 13:58:35,691 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:35,706 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:35,706 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:35,706 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:35,706 - INFO - After Normalization*************************************** +2025-07-04 13:58:35,706 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:35,711 - INFO - outputs: cuda:0 +2025-07-04 13:58:36,021 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:36,021 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:36,021 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:36,021 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:36,022 - INFO - After Normalization*************************************** +2025-07-04 13:58:36,022 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:36,026 - INFO - outputs: cuda:0 +2025-07-04 13:58:36,310 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:36,310 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:36,310 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:36,310 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:36,311 - INFO - After Normalization*************************************** +2025-07-04 13:58:36,311 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:36,315 - INFO - outputs: cuda:0 +2025-07-04 13:58:39,534 - INFO - Epoch 70/150 - Train Loss: 0.338463, Val Loss: 0.343932 +2025-07-04 13:58:39,549 - INFO - New best model saved with Val Loss: 0.343932 +2025-07-04 13:58:41,932 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:41,956 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:41,956 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:41,956 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:41,957 - INFO - After Normalization*************************************** +2025-07-04 13:58:41,957 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:41,961 - INFO - outputs: cuda:0 +2025-07-04 13:58:42,265 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:42,265 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:42,265 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:42,265 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:42,265 - INFO - After Normalization*************************************** +2025-07-04 13:58:42,265 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:42,270 - INFO - outputs: cuda:0 +2025-07-04 13:58:42,554 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:42,554 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:42,554 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:42,554 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:42,554 - INFO - After Normalization*************************************** +2025-07-04 13:58:42,554 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:42,559 - INFO - outputs: cuda:0 +2025-07-04 13:58:45,754 - INFO - Epoch 71/150 - Train Loss: 0.337893, Val Loss: 0.343028 +2025-07-04 13:58:45,769 - INFO - New best model saved with Val Loss: 0.343028 +2025-07-04 13:58:48,043 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:48,058 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:48,058 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:48,058 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:48,058 - INFO - After Normalization*************************************** +2025-07-04 13:58:48,058 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:48,063 - INFO - outputs: cuda:0 +2025-07-04 13:58:48,360 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:48,360 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:48,361 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:48,361 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:48,361 - INFO - After Normalization*************************************** +2025-07-04 13:58:48,361 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:48,366 - INFO - outputs: cuda:0 +2025-07-04 13:58:48,649 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:48,649 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:48,650 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:48,650 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:48,650 - INFO - After Normalization*************************************** +2025-07-04 13:58:48,650 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:48,654 - INFO - outputs: cuda:0 +2025-07-04 13:58:51,855 - INFO - Epoch 72/150 - Train Loss: 0.335130, Val Loss: 0.342844 +2025-07-04 13:58:51,869 - INFO - New best model saved with Val Loss: 0.342844 +2025-07-04 13:58:54,124 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:54,138 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:54,139 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:54,139 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:54,139 - INFO - After Normalization*************************************** +2025-07-04 13:58:54,139 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:54,144 - INFO - outputs: cuda:0 +2025-07-04 13:58:54,456 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:54,456 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:54,456 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:54,456 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:54,457 - INFO - After Normalization*************************************** +2025-07-04 13:58:54,457 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:54,461 - INFO - outputs: cuda:0 +2025-07-04 13:58:54,745 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:58:54,745 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:58:54,746 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:58:54,746 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:54,746 - INFO - After Normalization*************************************** +2025-07-04 13:58:54,746 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:58:54,750 - INFO - outputs: cuda:0 +2025-07-04 13:58:57,968 - INFO - Epoch 73/150 - Train Loss: 0.336449, Val Loss: 0.340494 +2025-07-04 13:58:57,983 - INFO - New best model saved with Val Loss: 0.340494 +2025-07-04 13:59:00,238 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:00,252 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:00,252 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:00,253 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:00,253 - INFO - After Normalization*************************************** +2025-07-04 13:59:00,253 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:00,257 - INFO - outputs: cuda:0 +2025-07-04 13:59:00,560 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:00,561 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:00,561 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:00,561 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:00,561 - INFO - After Normalization*************************************** +2025-07-04 13:59:00,561 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:00,566 - INFO - outputs: cuda:0 +2025-07-04 13:59:00,850 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:00,850 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:00,850 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:00,850 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:00,850 - INFO - After Normalization*************************************** +2025-07-04 13:59:00,850 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:00,855 - INFO - outputs: cuda:0 +2025-07-04 13:59:04,085 - INFO - Epoch 74/150 - Train Loss: 0.332509, Val Loss: 0.336325 +2025-07-04 13:59:04,099 - INFO - New best model saved with Val Loss: 0.336325 +2025-07-04 13:59:06,360 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:06,374 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:06,374 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:06,374 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:06,375 - INFO - After Normalization*************************************** +2025-07-04 13:59:06,375 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:06,379 - INFO - outputs: cuda:0 +2025-07-04 13:59:06,694 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:06,694 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:06,695 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:06,695 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:06,695 - INFO - After Normalization*************************************** +2025-07-04 13:59:06,695 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:06,699 - INFO - outputs: cuda:0 +2025-07-04 13:59:06,988 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:06,988 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:06,988 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:06,988 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:06,988 - INFO - After Normalization*************************************** +2025-07-04 13:59:06,988 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:06,993 - INFO - outputs: cuda:0 +2025-07-04 13:59:10,195 - INFO - Epoch 75/150 - Train Loss: 0.332463, Val Loss: 0.338520 +2025-07-04 13:59:12,434 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:12,434 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:12,435 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:12,435 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:12,435 - INFO - After Normalization*************************************** +2025-07-04 13:59:12,435 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:12,440 - INFO - outputs: cuda:0 +2025-07-04 13:59:12,750 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:12,750 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:12,750 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:12,750 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:12,750 - INFO - After Normalization*************************************** +2025-07-04 13:59:12,750 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:12,755 - INFO - outputs: cuda:0 +2025-07-04 13:59:13,039 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:13,039 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:13,039 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:13,039 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:13,039 - INFO - After Normalization*************************************** +2025-07-04 13:59:13,039 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:13,044 - INFO - outputs: cuda:0 +2025-07-04 13:59:16,244 - INFO - Epoch 76/150 - Train Loss: 0.330725, Val Loss: 0.343105 +2025-07-04 13:59:18,525 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:18,525 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:18,526 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:18,526 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:18,526 - INFO - After Normalization*************************************** +2025-07-04 13:59:18,526 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:18,531 - INFO - outputs: cuda:0 +2025-07-04 13:59:18,833 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:18,833 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:18,834 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:18,834 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:18,834 - INFO - After Normalization*************************************** +2025-07-04 13:59:18,834 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:18,839 - INFO - outputs: cuda:0 +2025-07-04 13:59:19,123 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:19,123 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:19,123 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:19,123 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:19,123 - INFO - After Normalization*************************************** +2025-07-04 13:59:19,123 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:19,128 - INFO - outputs: cuda:0 +2025-07-04 13:59:22,359 - INFO - Epoch 77/150 - Train Loss: 0.329678, Val Loss: 0.336431 +2025-07-04 13:59:24,617 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:24,617 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:24,618 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:24,618 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:24,618 - INFO - After Normalization*************************************** +2025-07-04 13:59:24,618 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:24,623 - INFO - outputs: cuda:0 +2025-07-04 13:59:24,919 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:24,919 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:24,919 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:24,920 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:24,920 - INFO - After Normalization*************************************** +2025-07-04 13:59:24,920 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:24,924 - INFO - outputs: cuda:0 +2025-07-04 13:59:25,208 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:25,208 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:25,209 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:25,209 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:25,209 - INFO - After Normalization*************************************** +2025-07-04 13:59:25,209 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:25,213 - INFO - outputs: cuda:0 +2025-07-04 13:59:28,395 - INFO - Epoch 78/150 - Train Loss: 0.332115, Val Loss: 0.340799 +2025-07-04 13:59:30,633 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:30,633 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:30,634 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:30,634 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:30,634 - INFO - After Normalization*************************************** +2025-07-04 13:59:30,634 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:30,639 - INFO - outputs: cuda:0 +2025-07-04 13:59:30,936 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:30,936 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:30,936 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:30,936 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:30,937 - INFO - After Normalization*************************************** +2025-07-04 13:59:30,937 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:30,941 - INFO - outputs: cuda:0 +2025-07-04 13:59:31,225 - INFO - before .to(local_rank)*************************************** +2025-07-04 13:59:31,225 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 13:59:31,226 - INFO - After .to(local_rank)*************************************** +2025-07-04 13:59:31,226 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:31,226 - INFO - After Normalization*************************************** +2025-07-04 13:59:31,226 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 13:59:31,230 - INFO - outputs: cuda:0 +2025-07-04 13:59:34,424 - INFO - Epoch 79/150 - Train Loss: 0.323830, Val Loss: 0.352417 +2025-07-04 13:59:52,745 - INFO - args.exp_name : Train_Test +2025-07-04 13:59:52,750 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 13:59:52,750 - INFO - Starting training with 1 GPUs +2025-07-04 13:59:57,016 - INFO - Total trainable parameters: 1437705 +2025-07-04 13:59:57,039 - INFO - Data loaded: 3 training batches, 1 validation batches, 1 test batches +2025-07-04 13:59:57,042 - INFO - Staring training for 150 epochs +2025-07-04 14:00:01,303 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:01,307 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:01,307 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:01,308 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:01,317 - INFO - After Normalization*************************************** +2025-07-04 14:00:01,318 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:01,801 - INFO - outputs: cuda:0 +2025-07-04 14:00:02,096 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:02,096 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:02,096 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:02,096 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:02,096 - INFO - After Normalization*************************************** +2025-07-04 14:00:02,096 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:02,102 - INFO - outputs: cuda:0 +2025-07-04 14:00:02,385 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:02,385 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:02,386 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:02,386 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:02,386 - INFO - After Normalization*************************************** +2025-07-04 14:00:02,386 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:02,390 - INFO - outputs: cuda:0 +2025-07-04 14:00:05,810 - INFO - Epoch 1/150 - Train Loss: 1.283437, Val Loss: 1.146866 +2025-07-04 14:00:05,829 - INFO - New best model saved with Val Loss: 1.146866 +2025-07-04 14:00:08,078 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:08,093 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:08,093 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:08,094 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:08,094 - INFO - After Normalization*************************************** +2025-07-04 14:00:08,094 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:08,098 - INFO - outputs: cuda:0 +2025-07-04 14:00:08,401 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:08,401 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:08,402 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:08,402 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:08,402 - INFO - After Normalization*************************************** +2025-07-04 14:00:08,402 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:08,407 - INFO - outputs: cuda:0 +2025-07-04 14:00:08,690 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:08,690 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:08,691 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:08,691 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:08,691 - INFO - After Normalization*************************************** +2025-07-04 14:00:08,691 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:08,695 - INFO - outputs: cuda:0 +2025-07-04 14:00:11,876 - INFO - Epoch 2/150 - Train Loss: 1.159811, Val Loss: 1.148012 +2025-07-04 14:00:14,148 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:14,163 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:14,163 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:14,163 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:14,163 - INFO - After Normalization*************************************** +2025-07-04 14:00:14,163 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:14,168 - INFO - outputs: cuda:0 +2025-07-04 14:00:14,466 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:14,466 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:14,467 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:14,467 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:14,467 - INFO - After Normalization*************************************** +2025-07-04 14:00:14,467 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:14,472 - INFO - outputs: cuda:0 +2025-07-04 14:00:14,755 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:14,755 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:14,756 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:14,756 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:14,756 - INFO - After Normalization*************************************** +2025-07-04 14:00:14,756 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:14,760 - INFO - outputs: cuda:0 +2025-07-04 14:00:17,941 - INFO - Epoch 3/150 - Train Loss: 1.015355, Val Loss: 1.148288 +2025-07-04 14:00:20,196 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:20,210 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:20,210 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:20,211 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:20,211 - INFO - After Normalization*************************************** +2025-07-04 14:00:20,211 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:20,216 - INFO - outputs: cuda:0 +2025-07-04 14:00:20,513 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:20,513 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:20,513 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:20,513 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:20,514 - INFO - After Normalization*************************************** +2025-07-04 14:00:20,514 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:20,518 - INFO - outputs: cuda:0 +2025-07-04 14:00:20,802 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:20,802 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:20,802 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:20,802 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:20,802 - INFO - After Normalization*************************************** +2025-07-04 14:00:20,802 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:20,807 - INFO - outputs: cuda:0 +2025-07-04 14:00:24,021 - INFO - Epoch 4/150 - Train Loss: 0.916734, Val Loss: 1.249089 +2025-07-04 14:00:26,289 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:26,304 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:26,304 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:26,304 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:26,304 - INFO - After Normalization*************************************** +2025-07-04 14:00:26,304 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:26,309 - INFO - outputs: cuda:0 +2025-07-04 14:00:26,621 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:26,622 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:26,622 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:26,622 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:26,622 - INFO - After Normalization*************************************** +2025-07-04 14:00:26,622 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:26,627 - INFO - outputs: cuda:0 +2025-07-04 14:00:26,910 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:26,910 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:26,911 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:26,911 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:26,911 - INFO - After Normalization*************************************** +2025-07-04 14:00:26,911 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:26,915 - INFO - outputs: cuda:0 +2025-07-04 14:00:30,083 - INFO - Epoch 5/150 - Train Loss: 0.839702, Val Loss: 1.442995 +2025-07-04 14:00:32,322 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:32,336 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:32,336 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:32,336 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:32,337 - INFO - After Normalization*************************************** +2025-07-04 14:00:32,337 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:32,342 - INFO - outputs: cuda:0 +2025-07-04 14:00:32,654 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:32,654 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:32,654 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:32,654 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:32,654 - INFO - After Normalization*************************************** +2025-07-04 14:00:32,655 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:32,659 - INFO - outputs: cuda:0 +2025-07-04 14:00:32,943 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:32,943 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:32,943 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:32,943 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:32,943 - INFO - After Normalization*************************************** +2025-07-04 14:00:32,943 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:32,948 - INFO - outputs: cuda:0 +2025-07-04 14:00:36,139 - INFO - Epoch 6/150 - Train Loss: 0.757819, Val Loss: 1.361055 +2025-07-04 14:00:38,393 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:38,406 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:38,406 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:38,406 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:38,407 - INFO - After Normalization*************************************** +2025-07-04 14:00:38,407 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:38,411 - INFO - outputs: cuda:0 +2025-07-04 14:00:38,718 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:38,718 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:38,718 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:38,718 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:38,718 - INFO - After Normalization*************************************** +2025-07-04 14:00:38,718 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:38,723 - INFO - outputs: cuda:0 +2025-07-04 14:00:39,006 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:39,007 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:39,007 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:39,007 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:39,007 - INFO - After Normalization*************************************** +2025-07-04 14:00:39,007 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:39,011 - INFO - outputs: cuda:0 +2025-07-04 14:00:42,216 - INFO - Epoch 7/150 - Train Loss: 0.661384, Val Loss: 1.379884 +2025-07-04 14:00:44,464 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:44,478 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:44,478 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:44,478 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:44,478 - INFO - After Normalization*************************************** +2025-07-04 14:00:44,478 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:44,484 - INFO - outputs: cuda:0 +2025-07-04 14:00:44,787 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:44,787 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:44,787 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:44,787 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:44,787 - INFO - After Normalization*************************************** +2025-07-04 14:00:44,787 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:44,792 - INFO - outputs: cuda:0 +2025-07-04 14:00:45,076 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:45,076 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:45,076 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:45,076 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:45,076 - INFO - After Normalization*************************************** +2025-07-04 14:00:45,076 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:45,081 - INFO - outputs: cuda:0 +2025-07-04 14:00:48,259 - INFO - Epoch 8/150 - Train Loss: 0.608690, Val Loss: 1.595568 +2025-07-04 14:00:50,499 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:50,513 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:50,513 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:50,513 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:50,513 - INFO - After Normalization*************************************** +2025-07-04 14:00:50,514 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:50,518 - INFO - outputs: cuda:0 +2025-07-04 14:00:50,820 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:50,820 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:50,820 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:50,820 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:50,820 - INFO - After Normalization*************************************** +2025-07-04 14:00:50,820 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:50,825 - INFO - outputs: cuda:0 +2025-07-04 14:00:51,109 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:51,109 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:51,109 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:51,109 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:51,109 - INFO - After Normalization*************************************** +2025-07-04 14:00:51,109 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:51,114 - INFO - outputs: cuda:0 +2025-07-04 14:00:54,319 - INFO - Epoch 9/150 - Train Loss: 0.560139, Val Loss: 1.838627 +2025-07-04 14:00:56,580 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:56,593 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:56,593 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:56,594 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:56,594 - INFO - After Normalization*************************************** +2025-07-04 14:00:56,594 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:56,598 - INFO - outputs: cuda:0 +2025-07-04 14:00:56,895 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:56,896 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:56,897 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:56,897 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:56,897 - INFO - After Normalization*************************************** +2025-07-04 14:00:56,897 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:56,902 - INFO - outputs: cuda:0 +2025-07-04 14:00:57,185 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:00:57,185 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:00:57,185 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:00:57,186 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:57,186 - INFO - After Normalization*************************************** +2025-07-04 14:00:57,186 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:00:57,190 - INFO - outputs: cuda:0 +2025-07-04 14:01:00,344 - INFO - Epoch 10/150 - Train Loss: 0.526667, Val Loss: 1.521763 +2025-07-04 14:01:02,770 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:02,783 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:02,783 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:02,783 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:02,784 - INFO - After Normalization*************************************** +2025-07-04 14:01:02,784 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:02,788 - INFO - outputs: cuda:0 +2025-07-04 14:01:03,089 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:03,089 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:03,089 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:03,089 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:03,089 - INFO - After Normalization*************************************** +2025-07-04 14:01:03,089 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:03,094 - INFO - outputs: cuda:0 +2025-07-04 14:01:03,382 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:03,382 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:03,382 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:03,382 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:03,382 - INFO - After Normalization*************************************** +2025-07-04 14:01:03,382 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:03,387 - INFO - outputs: cuda:0 +2025-07-04 14:01:06,544 - INFO - Epoch 11/150 - Train Loss: 0.494009, Val Loss: 1.437564 +2025-07-04 14:01:08,794 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:08,808 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:08,808 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:08,808 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:08,808 - INFO - After Normalization*************************************** +2025-07-04 14:01:08,808 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:08,813 - INFO - outputs: cuda:0 +2025-07-04 14:01:09,114 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:09,114 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:09,114 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:09,115 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:09,115 - INFO - After Normalization*************************************** +2025-07-04 14:01:09,115 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:09,120 - INFO - outputs: cuda:0 +2025-07-04 14:01:09,408 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:09,408 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:09,408 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:09,408 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:09,409 - INFO - After Normalization*************************************** +2025-07-04 14:01:09,409 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:09,413 - INFO - outputs: cuda:0 +2025-07-04 14:01:12,608 - INFO - Epoch 12/150 - Train Loss: 0.474216, Val Loss: 1.348367 +2025-07-04 14:01:14,860 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:14,874 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:14,874 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:14,874 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:14,875 - INFO - After Normalization*************************************** +2025-07-04 14:01:14,875 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:14,879 - INFO - outputs: cuda:0 +2025-07-04 14:01:15,177 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:15,177 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:15,177 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:15,177 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:15,177 - INFO - After Normalization*************************************** +2025-07-04 14:01:15,177 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:15,182 - INFO - outputs: cuda:0 +2025-07-04 14:01:15,469 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:15,469 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:15,469 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:15,470 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:15,470 - INFO - After Normalization*************************************** +2025-07-04 14:01:15,470 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:15,474 - INFO - outputs: cuda:0 +2025-07-04 14:01:18,677 - INFO - Epoch 13/150 - Train Loss: 0.457834, Val Loss: 1.089559 +2025-07-04 14:01:18,693 - INFO - New best model saved with Val Loss: 1.089559 +2025-07-04 14:01:20,954 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:20,967 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:20,967 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:20,967 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:20,967 - INFO - After Normalization*************************************** +2025-07-04 14:01:20,967 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:20,972 - INFO - outputs: cuda:0 +2025-07-04 14:01:21,285 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:21,285 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:21,285 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:21,285 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:21,286 - INFO - After Normalization*************************************** +2025-07-04 14:01:21,286 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:21,291 - INFO - outputs: cuda:0 +2025-07-04 14:01:21,579 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:21,579 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:21,579 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:21,579 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:21,579 - INFO - After Normalization*************************************** +2025-07-04 14:01:21,580 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:21,584 - INFO - outputs: cuda:0 +2025-07-04 14:01:24,792 - INFO - Epoch 14/150 - Train Loss: 0.455360, Val Loss: 0.832426 +2025-07-04 14:01:24,808 - INFO - New best model saved with Val Loss: 0.832426 +2025-07-04 14:01:27,066 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:27,080 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:27,081 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:27,081 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:27,081 - INFO - After Normalization*************************************** +2025-07-04 14:01:27,081 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:27,085 - INFO - outputs: cuda:0 +2025-07-04 14:01:27,392 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:27,393 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:27,393 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:27,393 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:27,393 - INFO - After Normalization*************************************** +2025-07-04 14:01:27,393 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:27,398 - INFO - outputs: cuda:0 +2025-07-04 14:01:27,681 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:27,681 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:27,682 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:27,682 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:27,682 - INFO - After Normalization*************************************** +2025-07-04 14:01:27,682 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:27,686 - INFO - outputs: cuda:0 +2025-07-04 14:01:30,881 - INFO - Epoch 15/150 - Train Loss: 0.447783, Val Loss: 0.654857 +2025-07-04 14:01:30,897 - INFO - New best model saved with Val Loss: 0.654857 +2025-07-04 14:01:33,164 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:33,178 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:33,179 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:33,179 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:33,179 - INFO - After Normalization*************************************** +2025-07-04 14:01:33,179 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:33,183 - INFO - outputs: cuda:0 +2025-07-04 14:01:33,483 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:33,483 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:33,484 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:33,484 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:33,484 - INFO - After Normalization*************************************** +2025-07-04 14:01:33,485 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:33,489 - INFO - outputs: cuda:0 +2025-07-04 14:01:33,773 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:33,773 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:33,773 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:33,773 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:33,773 - INFO - After Normalization*************************************** +2025-07-04 14:01:33,773 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:33,778 - INFO - outputs: cuda:0 +2025-07-04 14:01:36,953 - INFO - Epoch 16/150 - Train Loss: 0.446389, Val Loss: 0.548675 +2025-07-04 14:01:36,967 - INFO - New best model saved with Val Loss: 0.548675 +2025-07-04 14:01:39,224 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:39,238 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:39,238 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:39,238 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:39,238 - INFO - After Normalization*************************************** +2025-07-04 14:01:39,238 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:39,243 - INFO - outputs: cuda:0 +2025-07-04 14:01:39,562 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:39,562 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:39,563 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:39,563 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:39,563 - INFO - After Normalization*************************************** +2025-07-04 14:01:39,563 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:39,567 - INFO - outputs: cuda:0 +2025-07-04 14:01:39,851 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:39,851 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:39,851 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:39,851 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:39,852 - INFO - After Normalization*************************************** +2025-07-04 14:01:39,852 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:39,856 - INFO - outputs: cuda:0 +2025-07-04 14:01:43,041 - INFO - Epoch 17/150 - Train Loss: 0.440610, Val Loss: 0.495317 +2025-07-04 14:01:43,056 - INFO - New best model saved with Val Loss: 0.495317 +2025-07-04 14:01:45,318 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:45,331 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:45,332 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:45,332 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:45,332 - INFO - After Normalization*************************************** +2025-07-04 14:01:45,332 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:45,337 - INFO - outputs: cuda:0 +2025-07-04 14:01:45,645 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:45,646 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:45,646 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:45,646 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:45,647 - INFO - After Normalization*************************************** +2025-07-04 14:01:45,647 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:45,651 - INFO - outputs: cuda:0 +2025-07-04 14:01:45,935 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:45,935 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:45,935 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:45,935 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:45,936 - INFO - After Normalization*************************************** +2025-07-04 14:01:45,936 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:45,940 - INFO - outputs: cuda:0 +2025-07-04 14:01:49,111 - INFO - Epoch 18/150 - Train Loss: 0.438833, Val Loss: 0.464275 +2025-07-04 14:01:49,127 - INFO - New best model saved with Val Loss: 0.464275 +2025-07-04 14:01:51,385 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:51,398 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:51,399 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:51,399 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:51,399 - INFO - After Normalization*************************************** +2025-07-04 14:01:51,399 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:51,403 - INFO - outputs: cuda:0 +2025-07-04 14:01:51,710 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:51,710 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:51,710 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:51,710 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:51,710 - INFO - After Normalization*************************************** +2025-07-04 14:01:51,710 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:51,715 - INFO - outputs: cuda:0 +2025-07-04 14:01:51,999 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:51,999 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:51,999 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:51,999 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:51,999 - INFO - After Normalization*************************************** +2025-07-04 14:01:51,999 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:52,004 - INFO - outputs: cuda:0 +2025-07-04 14:01:55,192 - INFO - Epoch 19/150 - Train Loss: 0.439969, Val Loss: 0.443737 +2025-07-04 14:01:55,207 - INFO - New best model saved with Val Loss: 0.443737 +2025-07-04 14:01:57,449 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:57,462 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:57,463 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:57,463 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:57,463 - INFO - After Normalization*************************************** +2025-07-04 14:01:57,463 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:57,468 - INFO - outputs: cuda:0 +2025-07-04 14:01:57,770 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:57,770 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:57,770 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:57,771 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:57,771 - INFO - After Normalization*************************************** +2025-07-04 14:01:57,771 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:57,776 - INFO - outputs: cuda:0 +2025-07-04 14:01:58,060 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:01:58,060 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:01:58,060 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:01:58,060 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:58,060 - INFO - After Normalization*************************************** +2025-07-04 14:01:58,060 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:01:58,065 - INFO - outputs: cuda:0 +2025-07-04 14:02:01,240 - INFO - Epoch 20/150 - Train Loss: 0.431883, Val Loss: 0.428062 +2025-07-04 14:02:01,255 - INFO - New best model saved with Val Loss: 0.428062 +2025-07-04 14:02:03,624 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:03,624 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:03,624 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:03,624 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:03,625 - INFO - After Normalization*************************************** +2025-07-04 14:02:03,625 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:03,629 - INFO - outputs: cuda:0 +2025-07-04 14:02:03,935 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:03,935 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:03,935 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:03,935 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:03,935 - INFO - After Normalization*************************************** +2025-07-04 14:02:03,935 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:03,940 - INFO - outputs: cuda:0 +2025-07-04 14:02:04,223 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:04,223 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:04,224 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:04,224 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:04,224 - INFO - After Normalization*************************************** +2025-07-04 14:02:04,224 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:04,228 - INFO - outputs: cuda:0 +2025-07-04 14:02:07,422 - INFO - Epoch 21/150 - Train Loss: 0.431155, Val Loss: 0.423547 +2025-07-04 14:02:07,438 - INFO - New best model saved with Val Loss: 0.423547 +2025-07-04 14:02:09,708 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:09,708 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:09,709 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:09,709 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:09,709 - INFO - After Normalization*************************************** +2025-07-04 14:02:09,709 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:09,714 - INFO - outputs: cuda:0 +2025-07-04 14:02:10,014 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:10,015 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:10,015 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:10,015 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:10,016 - INFO - After Normalization*************************************** +2025-07-04 14:02:10,016 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:10,021 - INFO - outputs: cuda:0 +2025-07-04 14:02:10,304 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:10,304 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:10,305 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:10,305 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:10,305 - INFO - After Normalization*************************************** +2025-07-04 14:02:10,305 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:10,309 - INFO - outputs: cuda:0 +2025-07-04 14:02:13,508 - INFO - Epoch 22/150 - Train Loss: 0.428144, Val Loss: 0.426146 +2025-07-04 14:02:15,778 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:15,778 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:15,778 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:15,779 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:15,779 - INFO - After Normalization*************************************** +2025-07-04 14:02:15,779 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:15,783 - INFO - outputs: cuda:0 +2025-07-04 14:02:16,080 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:16,081 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:16,081 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:16,081 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:16,081 - INFO - After Normalization*************************************** +2025-07-04 14:02:16,081 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:16,086 - INFO - outputs: cuda:0 +2025-07-04 14:02:16,369 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:16,369 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:16,370 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:16,370 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:16,370 - INFO - After Normalization*************************************** +2025-07-04 14:02:16,370 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:16,374 - INFO - outputs: cuda:0 +2025-07-04 14:02:19,579 - INFO - Epoch 23/150 - Train Loss: 0.425435, Val Loss: 0.427524 +2025-07-04 14:02:21,833 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:21,833 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:21,833 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:21,833 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:21,834 - INFO - After Normalization*************************************** +2025-07-04 14:02:21,834 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:21,838 - INFO - outputs: cuda:0 +2025-07-04 14:02:22,137 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:22,137 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:22,137 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:22,137 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:22,137 - INFO - After Normalization*************************************** +2025-07-04 14:02:22,138 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:22,143 - INFO - outputs: cuda:0 +2025-07-04 14:02:22,426 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:22,426 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:22,427 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:22,427 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:22,427 - INFO - After Normalization*************************************** +2025-07-04 14:02:22,427 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:22,431 - INFO - outputs: cuda:0 +2025-07-04 14:02:25,616 - INFO - Epoch 24/150 - Train Loss: 0.422210, Val Loss: 0.422279 +2025-07-04 14:02:25,631 - INFO - New best model saved with Val Loss: 0.422279 +2025-07-04 14:02:27,871 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:27,871 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:27,872 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:27,872 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:27,872 - INFO - After Normalization*************************************** +2025-07-04 14:02:27,872 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:27,877 - INFO - outputs: cuda:0 +2025-07-04 14:02:28,187 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:28,187 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:28,187 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:28,187 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:28,187 - INFO - After Normalization*************************************** +2025-07-04 14:02:28,187 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:28,192 - INFO - outputs: cuda:0 +2025-07-04 14:02:28,475 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:28,475 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:28,475 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:28,476 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:28,476 - INFO - After Normalization*************************************** +2025-07-04 14:02:28,476 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:28,480 - INFO - outputs: cuda:0 +2025-07-04 14:02:31,663 - INFO - Epoch 25/150 - Train Loss: 0.423424, Val Loss: 0.427988 +2025-07-04 14:02:33,890 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:33,891 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:33,891 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:33,891 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:33,891 - INFO - After Normalization*************************************** +2025-07-04 14:02:33,891 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:33,896 - INFO - outputs: cuda:0 +2025-07-04 14:02:34,195 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:34,195 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:34,196 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:34,196 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:34,196 - INFO - After Normalization*************************************** +2025-07-04 14:02:34,196 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:34,201 - INFO - outputs: cuda:0 +2025-07-04 14:02:34,485 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:34,485 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:34,485 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:34,485 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:34,485 - INFO - After Normalization*************************************** +2025-07-04 14:02:34,485 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:34,490 - INFO - outputs: cuda:0 +2025-07-04 14:02:37,674 - INFO - Epoch 26/150 - Train Loss: 0.416589, Val Loss: 0.438708 +2025-07-04 14:02:39,928 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:39,928 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:39,929 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:39,929 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:39,929 - INFO - After Normalization*************************************** +2025-07-04 14:02:39,929 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:39,934 - INFO - outputs: cuda:0 +2025-07-04 14:02:40,245 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:40,245 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:40,245 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:40,245 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:40,245 - INFO - After Normalization*************************************** +2025-07-04 14:02:40,245 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:40,250 - INFO - outputs: cuda:0 +2025-07-04 14:02:40,533 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:40,534 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:40,534 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:40,534 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:40,534 - INFO - After Normalization*************************************** +2025-07-04 14:02:40,534 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:40,539 - INFO - outputs: cuda:0 +2025-07-04 14:02:43,695 - INFO - Epoch 27/150 - Train Loss: 0.415543, Val Loss: 0.429039 +2025-07-04 14:02:45,939 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:45,939 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:45,940 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:45,940 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:45,940 - INFO - After Normalization*************************************** +2025-07-04 14:02:45,940 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:45,944 - INFO - outputs: cuda:0 +2025-07-04 14:02:46,239 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:46,239 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:46,240 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:46,240 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:46,240 - INFO - After Normalization*************************************** +2025-07-04 14:02:46,240 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:46,244 - INFO - outputs: cuda:0 +2025-07-04 14:02:46,528 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:46,529 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:46,529 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:46,529 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:46,529 - INFO - After Normalization*************************************** +2025-07-04 14:02:46,529 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:46,534 - INFO - outputs: cuda:0 +2025-07-04 14:02:49,700 - INFO - Epoch 28/150 - Train Loss: 0.413719, Val Loss: 0.420002 +2025-07-04 14:02:49,716 - INFO - New best model saved with Val Loss: 0.420002 +2025-07-04 14:02:51,972 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:51,972 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:51,973 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:51,973 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:51,973 - INFO - After Normalization*************************************** +2025-07-04 14:02:51,973 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:51,978 - INFO - outputs: cuda:0 +2025-07-04 14:02:52,276 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:52,276 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:52,276 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:52,276 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:52,276 - INFO - After Normalization*************************************** +2025-07-04 14:02:52,276 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:52,281 - INFO - outputs: cuda:0 +2025-07-04 14:02:52,565 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:52,565 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:52,565 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:52,565 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:52,565 - INFO - After Normalization*************************************** +2025-07-04 14:02:52,565 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:52,570 - INFO - outputs: cuda:0 +2025-07-04 14:02:55,747 - INFO - Epoch 29/150 - Train Loss: 0.410406, Val Loss: 0.413966 +2025-07-04 14:02:55,761 - INFO - New best model saved with Val Loss: 0.413966 +2025-07-04 14:02:58,022 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:58,022 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:58,023 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:58,023 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:58,023 - INFO - After Normalization*************************************** +2025-07-04 14:02:58,023 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:58,028 - INFO - outputs: cuda:0 +2025-07-04 14:02:58,342 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:58,343 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:58,343 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:58,343 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:58,343 - INFO - After Normalization*************************************** +2025-07-04 14:02:58,343 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:58,348 - INFO - outputs: cuda:0 +2025-07-04 14:02:58,631 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:02:58,632 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:02:58,632 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:02:58,632 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:58,633 - INFO - After Normalization*************************************** +2025-07-04 14:02:58,633 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:02:58,637 - INFO - outputs: cuda:0 +2025-07-04 14:03:01,817 - INFO - Epoch 30/150 - Train Loss: 0.409573, Val Loss: 0.411146 +2025-07-04 14:03:01,831 - INFO - New best model saved with Val Loss: 0.411146 +2025-07-04 14:03:04,208 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:04,208 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:04,208 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:04,208 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:04,208 - INFO - After Normalization*************************************** +2025-07-04 14:03:04,208 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:04,213 - INFO - outputs: cuda:0 +2025-07-04 14:03:04,510 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:04,510 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:04,510 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:04,510 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:04,511 - INFO - After Normalization*************************************** +2025-07-04 14:03:04,511 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:04,515 - INFO - outputs: cuda:0 +2025-07-04 14:03:04,799 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:04,799 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:04,799 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:04,799 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:04,799 - INFO - After Normalization*************************************** +2025-07-04 14:03:04,799 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:04,804 - INFO - outputs: cuda:0 +2025-07-04 14:03:07,996 - INFO - Epoch 31/150 - Train Loss: 0.407763, Val Loss: 0.405612 +2025-07-04 14:03:08,015 - INFO - New best model saved with Val Loss: 0.405612 +2025-07-04 14:03:10,260 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:10,260 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:10,260 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:10,260 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:10,260 - INFO - After Normalization*************************************** +2025-07-04 14:03:10,260 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:10,265 - INFO - outputs: cuda:0 +2025-07-04 14:03:10,562 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:10,562 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:10,562 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:10,562 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:10,562 - INFO - After Normalization*************************************** +2025-07-04 14:03:10,563 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:10,567 - INFO - outputs: cuda:0 +2025-07-04 14:03:10,851 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:10,852 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:10,852 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:10,852 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:10,852 - INFO - After Normalization*************************************** +2025-07-04 14:03:10,852 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:10,857 - INFO - outputs: cuda:0 +2025-07-04 14:03:14,040 - INFO - Epoch 32/150 - Train Loss: 0.407623, Val Loss: 0.406176 +2025-07-04 14:03:16,273 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:16,273 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:16,274 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:16,274 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:16,274 - INFO - After Normalization*************************************** +2025-07-04 14:03:16,274 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:16,279 - INFO - outputs: cuda:0 +2025-07-04 14:03:16,575 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:16,575 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:16,575 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:16,575 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:16,575 - INFO - After Normalization*************************************** +2025-07-04 14:03:16,576 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:16,580 - INFO - outputs: cuda:0 +2025-07-04 14:03:16,864 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:16,864 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:16,864 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:16,864 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:16,864 - INFO - After Normalization*************************************** +2025-07-04 14:03:16,864 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:16,869 - INFO - outputs: cuda:0 +2025-07-04 14:03:20,036 - INFO - Epoch 33/150 - Train Loss: 0.403604, Val Loss: 0.407962 +2025-07-04 14:03:22,279 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:22,279 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:22,280 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:22,280 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:22,280 - INFO - After Normalization*************************************** +2025-07-04 14:03:22,280 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:22,285 - INFO - outputs: cuda:0 +2025-07-04 14:03:22,583 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:22,583 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:22,583 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:22,583 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:22,583 - INFO - After Normalization*************************************** +2025-07-04 14:03:22,583 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:22,588 - INFO - outputs: cuda:0 +2025-07-04 14:03:22,871 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:22,872 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:22,872 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:22,873 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:22,873 - INFO - After Normalization*************************************** +2025-07-04 14:03:22,873 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:22,877 - INFO - outputs: cuda:0 +2025-07-04 14:03:26,046 - INFO - Epoch 34/150 - Train Loss: 0.399802, Val Loss: 0.401497 +2025-07-04 14:03:26,062 - INFO - New best model saved with Val Loss: 0.401497 +2025-07-04 14:03:28,313 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:28,313 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:28,314 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:28,314 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:28,314 - INFO - After Normalization*************************************** +2025-07-04 14:03:28,314 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:28,319 - INFO - outputs: cuda:0 +2025-07-04 14:03:28,616 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:28,616 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:28,616 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:28,616 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:28,616 - INFO - After Normalization*************************************** +2025-07-04 14:03:28,616 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:28,621 - INFO - outputs: cuda:0 +2025-07-04 14:03:28,905 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:28,905 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:28,905 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:28,905 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:28,905 - INFO - After Normalization*************************************** +2025-07-04 14:03:28,905 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:28,910 - INFO - outputs: cuda:0 +2025-07-04 14:03:32,089 - INFO - Epoch 35/150 - Train Loss: 0.400421, Val Loss: 0.400646 +2025-07-04 14:03:32,104 - INFO - New best model saved with Val Loss: 0.400646 +2025-07-04 14:03:34,376 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:34,376 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:34,377 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:34,377 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:34,377 - INFO - After Normalization*************************************** +2025-07-04 14:03:34,377 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:34,382 - INFO - outputs: cuda:0 +2025-07-04 14:03:34,683 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:34,683 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:34,683 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:34,684 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:34,684 - INFO - After Normalization*************************************** +2025-07-04 14:03:34,684 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:34,688 - INFO - outputs: cuda:0 +2025-07-04 14:03:34,972 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:34,972 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:34,972 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:34,973 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:34,973 - INFO - After Normalization*************************************** +2025-07-04 14:03:34,973 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:34,978 - INFO - outputs: cuda:0 +2025-07-04 14:03:38,155 - INFO - Epoch 36/150 - Train Loss: 0.395775, Val Loss: 0.403946 +2025-07-04 14:03:40,392 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:40,393 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:40,393 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:40,393 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:40,393 - INFO - After Normalization*************************************** +2025-07-04 14:03:40,393 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:40,398 - INFO - outputs: cuda:0 +2025-07-04 14:03:40,693 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:40,694 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:40,694 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:40,694 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:40,694 - INFO - After Normalization*************************************** +2025-07-04 14:03:40,694 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:40,699 - INFO - outputs: cuda:0 +2025-07-04 14:03:40,982 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:40,982 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:40,983 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:40,983 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:40,983 - INFO - After Normalization*************************************** +2025-07-04 14:03:40,983 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:40,987 - INFO - outputs: cuda:0 +2025-07-04 14:03:44,180 - INFO - Epoch 37/150 - Train Loss: 0.395949, Val Loss: 0.401222 +2025-07-04 14:03:46,426 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:46,426 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:46,426 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:46,426 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:46,426 - INFO - After Normalization*************************************** +2025-07-04 14:03:46,427 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:46,431 - INFO - outputs: cuda:0 +2025-07-04 14:03:46,728 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:46,728 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:46,728 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:46,728 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:46,729 - INFO - After Normalization*************************************** +2025-07-04 14:03:46,729 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:46,733 - INFO - outputs: cuda:0 +2025-07-04 14:03:47,017 - INFO - before .to(local_rank)*************************************** +2025-07-04 14:03:47,017 - INFO - data and targets: (device(type='cpu'), device(type='cpu')) +2025-07-04 14:03:47,017 - INFO - After .to(local_rank)*************************************** +2025-07-04 14:03:47,017 - INFO - (device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:47,017 - INFO - After Normalization*************************************** +2025-07-04 14:03:47,018 - INFO - data and targets:(device(type='cuda', index=0), device(type='cuda', index=0)) +2025-07-04 14:03:47,022 - INFO - outputs: cuda:0 +2025-07-04 14:03:50,195 - INFO - Epoch 38/150 - Train Loss: 0.393724, Val Loss: 0.398120 +2025-07-04 14:03:50,210 - INFO - New best model saved with Val Loss: 0.398120 +2025-07-04 14:04:10,422 - INFO - args.exp_name : Train_Test +2025-07-04 14:04:10,428 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/Data_Pressure/Pressure_VTK', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/Data_Pressure/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 14:04:10,428 - INFO - Starting training with 1 GPUs +2025-07-04 14:04:14,610 - INFO - Total trainable parameters: 1437705 +2025-07-04 14:04:14,639 - INFO - Data loaded: 3 training batches, 1 validation batches, 1 test batches +2025-07-04 14:04:14,640 - INFO - Staring training for 150 epochs +2025-07-04 14:04:22,781 - INFO - Epoch 1/150 - Train Loss: 1.283437, Val Loss: 1.146866 +2025-07-04 14:04:22,800 - INFO - New best model saved with Val Loss: 1.146866 +2025-07-04 14:04:28,883 - INFO - Epoch 2/150 - Train Loss: 1.159811, Val Loss: 1.148012 +2025-07-04 14:04:34,995 - INFO - Epoch 3/150 - Train Loss: 1.015355, Val Loss: 1.148288 +2025-07-04 14:04:41,081 - INFO - Epoch 4/150 - Train Loss: 0.916734, Val Loss: 1.249089 +2025-07-04 14:04:47,161 - INFO - Epoch 5/150 - Train Loss: 0.839702, Val Loss: 1.442995 +2025-07-04 14:04:53,276 - INFO - Epoch 6/150 - Train Loss: 0.757819, Val Loss: 1.361055 +2025-07-04 14:04:59,359 - INFO - Epoch 7/150 - Train Loss: 0.661384, Val Loss: 1.379884 +2025-07-04 14:05:05,449 - INFO - Epoch 8/150 - Train Loss: 0.608690, Val Loss: 1.595568 +2025-07-04 14:05:11,570 - INFO - Epoch 9/150 - Train Loss: 0.560139, Val Loss: 1.838627 +2025-07-04 14:05:17,655 - INFO - Epoch 10/150 - Train Loss: 0.526667, Val Loss: 1.521763 +2025-07-04 14:05:23,895 - INFO - Epoch 11/150 - Train Loss: 0.494009, Val Loss: 1.437564 +2025-07-04 14:05:29,973 - INFO - Epoch 12/150 - Train Loss: 0.474216, Val Loss: 1.348367 +2025-07-04 14:05:36,057 - INFO - Epoch 13/150 - Train Loss: 0.457834, Val Loss: 1.089559 +2025-07-04 14:05:36,072 - INFO - New best model saved with Val Loss: 1.089559 +2025-07-04 14:05:42,158 - INFO - Epoch 14/150 - Train Loss: 0.455360, Val Loss: 0.832426 +2025-07-04 14:05:42,172 - INFO - New best model saved with Val Loss: 0.832426 +2025-07-04 14:05:48,231 - INFO - Epoch 15/150 - Train Loss: 0.447783, Val Loss: 0.654857 +2025-07-04 14:05:48,245 - INFO - New best model saved with Val Loss: 0.654857 +2025-07-04 14:05:54,295 - INFO - Epoch 16/150 - Train Loss: 0.446389, Val Loss: 0.548675 +2025-07-04 14:05:54,309 - INFO - New best model saved with Val Loss: 0.548675 +2025-07-04 14:06:00,408 - INFO - Epoch 17/150 - Train Loss: 0.440610, Val Loss: 0.495317 +2025-07-04 14:06:00,421 - INFO - New best model saved with Val Loss: 0.495317 +2025-07-04 14:06:06,488 - INFO - Epoch 18/150 - Train Loss: 0.438833, Val Loss: 0.464275 +2025-07-04 14:06:06,503 - INFO - New best model saved with Val Loss: 0.464275 +2025-07-04 14:06:12,558 - INFO - Epoch 19/150 - Train Loss: 0.439969, Val Loss: 0.443737 +2025-07-04 14:06:12,574 - INFO - New best model saved with Val Loss: 0.443737 +2025-07-04 14:06:18,648 - INFO - Epoch 20/150 - Train Loss: 0.431883, Val Loss: 0.428062 +2025-07-04 14:06:18,661 - INFO - New best model saved with Val Loss: 0.428062 +2025-07-04 14:06:24,819 - INFO - Epoch 21/150 - Train Loss: 0.431155, Val Loss: 0.423547 +2025-07-04 14:06:24,833 - INFO - New best model saved with Val Loss: 0.423547 +2025-07-04 14:06:30,899 - INFO - Epoch 22/150 - Train Loss: 0.428144, Val Loss: 0.426146 +2025-07-04 14:06:36,979 - INFO - Epoch 23/150 - Train Loss: 0.425435, Val Loss: 0.427524 +2025-07-04 14:06:43,061 - INFO - Epoch 24/150 - Train Loss: 0.422210, Val Loss: 0.422279 +2025-07-04 14:06:43,077 - INFO - New best model saved with Val Loss: 0.422279 +2025-07-04 14:06:49,154 - INFO - Epoch 25/150 - Train Loss: 0.423424, Val Loss: 0.427988 +2025-07-04 14:06:55,255 - INFO - Epoch 26/150 - Train Loss: 0.416589, Val Loss: 0.438708 +2025-07-04 14:07:01,340 - INFO - Epoch 27/150 - Train Loss: 0.415543, Val Loss: 0.429039 +2025-07-04 14:07:07,374 - INFO - Epoch 28/150 - Train Loss: 0.413719, Val Loss: 0.420002 +2025-07-04 14:07:07,391 - INFO - New best model saved with Val Loss: 0.420002 +2025-07-04 14:07:13,473 - INFO - Epoch 29/150 - Train Loss: 0.410406, Val Loss: 0.413966 +2025-07-04 14:07:13,486 - INFO - New best model saved with Val Loss: 0.413966 +2025-07-04 14:07:19,583 - INFO - Epoch 30/150 - Train Loss: 0.409573, Val Loss: 0.411146 +2025-07-04 14:07:19,600 - INFO - New best model saved with Val Loss: 0.411146 +2025-07-04 14:07:25,826 - INFO - Epoch 31/150 - Train Loss: 0.407763, Val Loss: 0.405612 +2025-07-04 14:07:25,841 - INFO - New best model saved with Val Loss: 0.405612 +2025-07-04 14:07:31,901 - INFO - Epoch 32/150 - Train Loss: 0.407623, Val Loss: 0.406176 +2025-07-04 14:07:37,963 - INFO - Epoch 33/150 - Train Loss: 0.403604, Val Loss: 0.407962 +2025-07-04 14:07:44,038 - INFO - Epoch 34/150 - Train Loss: 0.399802, Val Loss: 0.401497 +2025-07-04 14:07:44,054 - INFO - New best model saved with Val Loss: 0.401497 +2025-07-04 14:07:50,137 - INFO - Epoch 35/150 - Train Loss: 0.400421, Val Loss: 0.400646 +2025-07-04 14:07:50,151 - INFO - New best model saved with Val Loss: 0.400646 +2025-07-04 14:07:56,225 - INFO - Epoch 36/150 - Train Loss: 0.395775, Val Loss: 0.403946 +2025-07-04 14:08:02,322 - INFO - Epoch 37/150 - Train Loss: 0.395949, Val Loss: 0.401222 +2025-07-04 14:08:08,402 - INFO - Epoch 38/150 - Train Loss: 0.393724, Val Loss: 0.398120 +2025-07-04 14:08:08,418 - INFO - New best model saved with Val Loss: 0.398120 +2025-07-04 14:08:14,493 - INFO - Epoch 39/150 - Train Loss: 0.390039, Val Loss: 0.399025 +2025-07-04 14:08:20,581 - INFO - Epoch 40/150 - Train Loss: 0.391684, Val Loss: 0.390313 +2025-07-04 14:08:20,595 - INFO - New best model saved with Val Loss: 0.390313 +2025-07-04 14:08:26,806 - INFO - Epoch 41/150 - Train Loss: 0.386746, Val Loss: 0.386531 +2025-07-04 14:08:26,821 - INFO - New best model saved with Val Loss: 0.386531 +2025-07-04 14:08:32,947 - INFO - Epoch 42/150 - Train Loss: 0.386503, Val Loss: 0.384749 +2025-07-04 14:08:32,963 - INFO - New best model saved with Val Loss: 0.384749 +2025-07-04 14:08:39,053 - INFO - Epoch 43/150 - Train Loss: 0.385221, Val Loss: 0.382403 +2025-07-04 14:08:39,067 - INFO - New best model saved with Val Loss: 0.382403 +2025-07-04 14:08:45,166 - INFO - Epoch 44/150 - Train Loss: 0.379563, Val Loss: 0.379401 +2025-07-04 14:08:45,181 - INFO - New best model saved with Val Loss: 0.379401 +2025-07-04 14:08:51,239 - INFO - Epoch 45/150 - Train Loss: 0.381152, Val Loss: 0.383046 +2025-07-04 14:08:57,307 - INFO - Epoch 46/150 - Train Loss: 0.377841, Val Loss: 0.374077 +2025-07-04 14:08:57,322 - INFO - New best model saved with Val Loss: 0.374077 +2025-07-04 14:09:03,438 - INFO - Epoch 47/150 - Train Loss: 0.376989, Val Loss: 0.374267 +2025-07-04 14:09:09,535 - INFO - Epoch 48/150 - Train Loss: 0.372625, Val Loss: 0.375827 +2025-07-04 14:09:15,586 - INFO - Epoch 49/150 - Train Loss: 0.373013, Val Loss: 0.377917 +2025-07-04 14:09:21,631 - INFO - Epoch 50/150 - Train Loss: 0.370192, Val Loss: 0.374004 +2025-07-04 14:09:21,646 - INFO - New best model saved with Val Loss: 0.374004 +2025-07-04 14:09:27,811 - INFO - Epoch 51/150 - Train Loss: 0.370575, Val Loss: 0.368872 +2025-07-04 14:09:27,826 - INFO - New best model saved with Val Loss: 0.368872 +2025-07-04 14:09:33,870 - INFO - Epoch 52/150 - Train Loss: 0.366457, Val Loss: 0.366532 +2025-07-04 14:09:33,883 - INFO - New best model saved with Val Loss: 0.366532 +2025-07-04 14:09:39,957 - INFO - Epoch 53/150 - Train Loss: 0.364227, Val Loss: 0.364998 +2025-07-04 14:09:39,970 - INFO - New best model saved with Val Loss: 0.364998 +2025-07-04 14:09:46,019 - INFO - Epoch 54/150 - Train Loss: 0.362108, Val Loss: 0.361583 +2025-07-04 14:09:46,034 - INFO - New best model saved with Val Loss: 0.361583 +2025-07-04 14:09:52,086 - INFO - Epoch 55/150 - Train Loss: 0.362925, Val Loss: 0.360435 +2025-07-04 14:09:52,101 - INFO - New best model saved with Val Loss: 0.360435 +2025-07-04 14:09:58,156 - INFO - Epoch 56/150 - Train Loss: 0.360084, Val Loss: 0.360202 +2025-07-04 14:09:58,171 - INFO - New best model saved with Val Loss: 0.360202 +2025-07-04 14:10:04,242 - INFO - Epoch 57/150 - Train Loss: 0.356597, Val Loss: 0.362886 +2025-07-04 14:10:10,343 - INFO - Epoch 58/150 - Train Loss: 0.359285, Val Loss: 0.363573 +2025-07-04 14:10:16,464 - INFO - Epoch 59/150 - Train Loss: 0.353965, Val Loss: 0.367631 +2025-07-04 14:10:22,528 - INFO - Epoch 60/150 - Train Loss: 0.355830, Val Loss: 0.363127 +2025-07-04 14:10:28,701 - INFO - Epoch 61/150 - Train Loss: 0.352915, Val Loss: 0.355417 +2025-07-04 14:10:28,717 - INFO - New best model saved with Val Loss: 0.355417 +2025-07-04 14:10:34,787 - INFO - Epoch 62/150 - Train Loss: 0.349458, Val Loss: 0.353605 +2025-07-04 14:10:34,801 - INFO - New best model saved with Val Loss: 0.353605 +2025-07-04 14:10:40,846 - INFO - Epoch 63/150 - Train Loss: 0.349392, Val Loss: 0.354289 +2025-07-04 14:10:46,925 - INFO - Epoch 64/150 - Train Loss: 0.349203, Val Loss: 0.349903 +2025-07-04 14:10:46,939 - INFO - New best model saved with Val Loss: 0.349903 +2025-07-04 14:10:53,028 - INFO - Epoch 65/150 - Train Loss: 0.348612, Val Loss: 0.350857 +2025-07-04 14:10:59,063 - INFO - Epoch 66/150 - Train Loss: 0.344767, Val Loss: 0.353102 +2025-07-04 14:11:05,129 - INFO - Epoch 67/150 - Train Loss: 0.343171, Val Loss: 0.346992 +2025-07-04 14:11:05,143 - INFO - New best model saved with Val Loss: 0.346992 +2025-07-04 14:11:11,214 - INFO - Epoch 68/150 - Train Loss: 0.343175, Val Loss: 0.344801 +2025-07-04 14:11:11,229 - INFO - New best model saved with Val Loss: 0.344801 +2025-07-04 14:11:17,282 - INFO - Epoch 69/150 - Train Loss: 0.342111, Val Loss: 0.345257 +2025-07-04 14:11:23,348 - INFO - Epoch 70/150 - Train Loss: 0.338463, Val Loss: 0.343932 +2025-07-04 14:11:23,363 - INFO - New best model saved with Val Loss: 0.343932 +2025-07-04 14:11:29,540 - INFO - Epoch 71/150 - Train Loss: 0.337893, Val Loss: 0.343028 +2025-07-04 14:11:29,555 - INFO - New best model saved with Val Loss: 0.343028 +2025-07-04 14:11:35,586 - INFO - Epoch 72/150 - Train Loss: 0.335130, Val Loss: 0.342844 +2025-07-04 14:11:35,601 - INFO - New best model saved with Val Loss: 0.342844 +2025-07-04 14:11:41,651 - INFO - Epoch 73/150 - Train Loss: 0.336449, Val Loss: 0.340494 +2025-07-04 14:11:41,666 - INFO - New best model saved with Val Loss: 0.340494 +2025-07-04 14:11:47,731 - INFO - Epoch 74/150 - Train Loss: 0.332509, Val Loss: 0.336325 +2025-07-04 14:11:47,747 - INFO - New best model saved with Val Loss: 0.336325 +2025-07-04 14:11:53,843 - INFO - Epoch 75/150 - Train Loss: 0.332463, Val Loss: 0.338520 +2025-07-04 14:11:59,922 - INFO - Epoch 76/150 - Train Loss: 0.330725, Val Loss: 0.343105 +2025-07-04 14:12:05,976 - INFO - Epoch 77/150 - Train Loss: 0.329678, Val Loss: 0.336431 +2025-07-04 14:12:12,035 - INFO - Epoch 78/150 - Train Loss: 0.332115, Val Loss: 0.340799 +2025-07-04 14:12:18,095 - INFO - Epoch 79/150 - Train Loss: 0.323830, Val Loss: 0.352417 +2025-07-04 14:12:24,165 - INFO - Epoch 80/150 - Train Loss: 0.324538, Val Loss: 0.337843 +2025-07-04 14:12:30,361 - INFO - Epoch 81/150 - Train Loss: 0.323342, Val Loss: 0.337610 +2025-07-04 14:12:36,449 - INFO - Epoch 82/150 - Train Loss: 0.323520, Val Loss: 0.327724 +2025-07-04 14:12:36,464 - INFO - New best model saved with Val Loss: 0.327724 +2025-07-04 14:12:42,508 - INFO - Epoch 83/150 - Train Loss: 0.322984, Val Loss: 0.328694 +2025-07-04 14:12:48,561 - INFO - Epoch 84/150 - Train Loss: 0.320235, Val Loss: 0.327149 +2025-07-04 14:12:48,576 - INFO - New best model saved with Val Loss: 0.327149 +2025-07-04 14:12:54,645 - INFO - Epoch 85/150 - Train Loss: 0.318422, Val Loss: 0.322234 +2025-07-04 14:12:54,659 - INFO - New best model saved with Val Loss: 0.322234 +2025-07-04 14:13:00,708 - INFO - Epoch 86/150 - Train Loss: 0.316809, Val Loss: 0.323506 +2025-07-04 14:13:06,762 - INFO - Epoch 87/150 - Train Loss: 0.316298, Val Loss: 0.332799 +2025-07-04 14:13:12,832 - INFO - Epoch 88/150 - Train Loss: 0.317903, Val Loss: 0.324952 +2025-07-04 14:13:18,902 - INFO - Epoch 89/150 - Train Loss: 0.317754, Val Loss: 0.318536 +2025-07-04 14:13:18,917 - INFO - New best model saved with Val Loss: 0.318536 +2025-07-04 14:13:24,951 - INFO - Epoch 90/150 - Train Loss: 0.317134, Val Loss: 0.318901 +2025-07-04 14:13:31,110 - INFO - Epoch 91/150 - Train Loss: 0.313987, Val Loss: 0.322064 +2025-07-04 14:13:37,168 - INFO - Epoch 92/150 - Train Loss: 0.313308, Val Loss: 0.327740 +2025-07-04 14:13:43,242 - INFO - Epoch 93/150 - Train Loss: 0.312805, Val Loss: 0.322787 +2025-07-04 14:13:49,330 - INFO - Epoch 94/150 - Train Loss: 0.310842, Val Loss: 0.324050 +2025-07-04 14:13:55,382 - INFO - Epoch 95/150 - Train Loss: 0.311745, Val Loss: 0.313009 +2025-07-04 14:13:55,397 - INFO - New best model saved with Val Loss: 0.313009 +2025-07-04 14:14:01,452 - INFO - Epoch 96/150 - Train Loss: 0.308067, Val Loss: 0.321982 +2025-07-04 14:14:07,525 - INFO - Epoch 97/150 - Train Loss: 0.306806, Val Loss: 0.330466 +2025-07-04 14:14:13,609 - INFO - Epoch 98/150 - Train Loss: 0.304537, Val Loss: 0.335054 +2025-07-04 14:14:19,692 - INFO - Epoch 99/150 - Train Loss: 0.303622, Val Loss: 0.331282 +2025-07-04 14:14:25,748 - INFO - Epoch 100/150 - Train Loss: 0.303389, Val Loss: 0.322971 +2025-07-04 14:14:31,920 - INFO - Epoch 101/150 - Train Loss: 0.304374, Val Loss: 0.331376 +2025-07-04 14:14:37,954 - INFO - Epoch 102/150 - Train Loss: 0.301968, Val Loss: 0.325183 +2025-07-04 14:14:44,013 - INFO - Epoch 103/150 - Train Loss: 0.298230, Val Loss: 0.322976 +2025-07-04 14:14:50,092 - INFO - Epoch 104/150 - Train Loss: 0.301941, Val Loss: 0.339548 +2025-07-04 14:14:56,163 - INFO - Epoch 105/150 - Train Loss: 0.301876, Val Loss: 0.337391 +2025-07-04 14:15:02,225 - INFO - Epoch 106/150 - Train Loss: 0.301338, Val Loss: 0.324497 +2025-07-04 14:15:08,267 - INFO - Epoch 107/150 - Train Loss: 0.297671, Val Loss: 0.313158 +2025-07-04 14:15:14,333 - INFO - Epoch 108/150 - Train Loss: 0.294682, Val Loss: 0.305555 +2025-07-04 14:15:14,348 - INFO - New best model saved with Val Loss: 0.305555 +2025-07-04 14:15:20,429 - INFO - Epoch 109/150 - Train Loss: 0.297884, Val Loss: 0.303263 +2025-07-04 14:15:20,445 - INFO - New best model saved with Val Loss: 0.303263 +2025-07-04 14:15:26,528 - INFO - Epoch 110/150 - Train Loss: 0.297559, Val Loss: 0.302826 +2025-07-04 14:15:26,545 - INFO - New best model saved with Val Loss: 0.302826 +2025-07-04 14:15:32,728 - INFO - Epoch 111/150 - Train Loss: 0.294144, Val Loss: 0.302702 +2025-07-04 14:15:32,745 - INFO - New best model saved with Val Loss: 0.302702 +2025-07-04 14:15:38,800 - INFO - Epoch 112/150 - Train Loss: 0.294188, Val Loss: 0.303885 +2025-07-04 14:15:44,864 - INFO - Epoch 113/150 - Train Loss: 0.294812, Val Loss: 0.302568 +2025-07-04 14:15:44,879 - INFO - New best model saved with Val Loss: 0.302568 +2025-07-04 14:15:50,933 - INFO - Epoch 114/150 - Train Loss: 0.291335, Val Loss: 0.302743 +2025-07-04 14:15:57,006 - INFO - Epoch 115/150 - Train Loss: 0.294847, Val Loss: 0.301672 +2025-07-04 14:15:57,022 - INFO - New best model saved with Val Loss: 0.301672 +2025-07-04 14:16:03,104 - INFO - Epoch 116/150 - Train Loss: 0.291347, Val Loss: 0.301323 +2025-07-04 14:16:03,120 - INFO - New best model saved with Val Loss: 0.301323 +2025-07-04 14:16:09,159 - INFO - Epoch 117/150 - Train Loss: 0.297098, Val Loss: 0.301480 +2025-07-04 14:16:15,210 - INFO - Epoch 118/150 - Train Loss: 0.293385, Val Loss: 0.300978 +2025-07-04 14:16:15,225 - INFO - New best model saved with Val Loss: 0.300978 +2025-07-04 14:16:21,283 - INFO - Epoch 119/150 - Train Loss: 0.302592, Val Loss: 0.299818 +2025-07-04 14:16:21,298 - INFO - New best model saved with Val Loss: 0.299818 +2025-07-04 14:16:27,375 - INFO - Epoch 120/150 - Train Loss: 0.294459, Val Loss: 0.300232 +2025-07-04 14:16:33,542 - INFO - Epoch 121/150 - Train Loss: 0.292162, Val Loss: 0.299877 +2025-07-04 14:16:39,592 - INFO - Epoch 122/150 - Train Loss: 0.293628, Val Loss: 0.300110 +2025-07-04 14:16:45,667 - INFO - Epoch 123/150 - Train Loss: 0.290856, Val Loss: 0.300606 +2025-07-04 14:16:51,719 - INFO - Epoch 124/150 - Train Loss: 0.294557, Val Loss: 0.300868 +2025-07-04 14:16:57,769 - INFO - Epoch 125/150 - Train Loss: 0.293174, Val Loss: 0.300792 +2025-07-04 14:17:03,808 - INFO - Epoch 126/150 - Train Loss: 0.293391, Val Loss: 0.302475 +2025-07-04 14:17:09,864 - INFO - Epoch 127/150 - Train Loss: 0.290767, Val Loss: 0.301194 +2025-07-04 14:17:15,933 - INFO - Epoch 128/150 - Train Loss: 0.292764, Val Loss: 0.300578 +2025-07-04 14:17:21,995 - INFO - Epoch 129/150 - Train Loss: 0.291864, Val Loss: 0.299356 +2025-07-04 14:17:22,011 - INFO - New best model saved with Val Loss: 0.299356 +2025-07-04 14:17:28,067 - INFO - Epoch 130/150 - Train Loss: 0.293602, Val Loss: 0.299731 +2025-07-04 14:17:34,244 - INFO - Epoch 131/150 - Train Loss: 0.288207, Val Loss: 0.299689 +2025-07-04 14:17:40,323 - INFO - Epoch 132/150 - Train Loss: 0.292099, Val Loss: 0.299264 +2025-07-04 14:17:40,339 - INFO - New best model saved with Val Loss: 0.299264 +2025-07-04 14:17:46,405 - INFO - Epoch 133/150 - Train Loss: 0.290441, Val Loss: 0.299675 +2025-07-04 14:17:52,459 - INFO - Epoch 134/150 - Train Loss: 0.289598, Val Loss: 0.298882 +2025-07-04 14:17:52,473 - INFO - New best model saved with Val Loss: 0.298882 +2025-07-04 14:17:58,549 - INFO - Epoch 135/150 - Train Loss: 0.291279, Val Loss: 0.298781 +2025-07-04 14:17:58,564 - INFO - New best model saved with Val Loss: 0.298781 +2025-07-04 14:18:04,639 - INFO - Epoch 136/150 - Train Loss: 0.291402, Val Loss: 0.298846 +2025-07-04 14:18:10,708 - INFO - Epoch 137/150 - Train Loss: 0.289009, Val Loss: 0.298318 +2025-07-04 14:18:10,724 - INFO - New best model saved with Val Loss: 0.298318 +2025-07-04 14:18:16,806 - INFO - Epoch 138/150 - Train Loss: 0.290120, Val Loss: 0.299168 +2025-07-04 14:18:22,889 - INFO - Epoch 139/150 - Train Loss: 0.290107, Val Loss: 0.298576 +2025-07-04 14:18:28,949 - INFO - Epoch 140/150 - Train Loss: 0.292133, Val Loss: 0.298386 +2025-07-04 14:18:35,130 - INFO - Epoch 141/150 - Train Loss: 0.291022, Val Loss: 0.298982 +2025-07-04 14:18:41,201 - INFO - Epoch 142/150 - Train Loss: 0.289283, Val Loss: 0.298855 +2025-07-04 14:18:47,281 - INFO - Epoch 143/150 - Train Loss: 0.289318, Val Loss: 0.298431 +2025-07-04 14:18:53,401 - INFO - Epoch 144/150 - Train Loss: 0.293004, Val Loss: 0.299563 +2025-07-04 14:18:59,493 - INFO - Epoch 145/150 - Train Loss: 0.290829, Val Loss: 0.298820 +2025-07-04 14:19:05,550 - INFO - Epoch 146/150 - Train Loss: 0.293143, Val Loss: 0.299518 +2025-07-04 14:19:11,626 - INFO - Epoch 147/150 - Train Loss: 0.291828, Val Loss: 0.298869 +2025-07-04 14:19:17,708 - INFO - Epoch 148/150 - Train Loss: 0.290583, Val Loss: 0.299215 +2025-07-04 14:19:23,805 - INFO - Epoch 149/150 - Train Loss: 0.288438, Val Loss: 0.298331 +2025-07-04 14:19:29,879 - INFO - Epoch 150/150 - Train Loss: 0.290288, Val Loss: 0.298573 +2025-07-04 14:19:30,027 - INFO - Final model saved to experiments/Train_Test/final_model_tmp +2025-07-04 14:19:30,027 - INFO - Testing the final model +2025-07-04 14:19:30,027 - INFO - Testing the best model +2025-07-04 14:21:40,215 - INFO - args.exp_name : Train_Test +2025-07-04 14:21:40,216 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 14:21:40,216 - INFO - Starting training with 1 GPUs +2025-07-04 14:21:44,240 - INFO - Total trainable parameters: 1437705 +2025-07-04 14:21:44,379 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-04 14:21:44,380 - INFO - Staring training for 150 epochs +2025-07-04 14:22:04,933 - INFO - Epoch 1/150 - Train Loss: 0.765392, Val Loss: 1.117532 +2025-07-04 14:22:04,952 - INFO - New best model saved with Val Loss: 1.117532 +2025-07-04 14:22:22,641 - INFO - Epoch 2/150 - Train Loss: 0.425968, Val Loss: 0.480214 +2025-07-04 14:22:22,656 - INFO - New best model saved with Val Loss: 0.480214 +2025-07-04 14:22:40,391 - INFO - Epoch 3/150 - Train Loss: 0.359811, Val Loss: 0.444877 +2025-07-04 14:22:40,405 - INFO - New best model saved with Val Loss: 0.444877 +2025-07-04 14:22:58,111 - INFO - Epoch 4/150 - Train Loss: 0.312085, Val Loss: 0.590544 +2025-07-04 14:23:15,808 - INFO - Epoch 5/150 - Train Loss: 0.284683, Val Loss: 0.833210 +2025-07-04 14:23:33,573 - INFO - Epoch 6/150 - Train Loss: 0.266364, Val Loss: 1.044873 +2025-07-04 14:23:51,297 - INFO - Epoch 7/150 - Train Loss: 0.244517, Val Loss: 0.458772 +2025-07-04 14:24:09,010 - INFO - Epoch 8/150 - Train Loss: 0.227804, Val Loss: 1.118219 +2025-07-04 14:24:26,707 - INFO - Epoch 9/150 - Train Loss: 0.222722, Val Loss: 0.292218 +2025-07-04 14:24:26,723 - INFO - New best model saved with Val Loss: 0.292218 +2025-07-04 14:24:44,424 - INFO - Epoch 10/150 - Train Loss: 0.213346, Val Loss: 0.264306 +2025-07-04 14:24:44,441 - INFO - New best model saved with Val Loss: 0.264306 +2025-07-04 14:25:02,335 - INFO - Epoch 11/150 - Train Loss: 0.198541, Val Loss: 1.010461 +2025-07-04 14:25:20,079 - INFO - Epoch 12/150 - Train Loss: 0.188102, Val Loss: 0.402349 +2025-07-04 14:25:37,842 - INFO - Epoch 13/150 - Train Loss: 0.190205, Val Loss: 0.351225 +2025-07-04 14:25:55,564 - INFO - Epoch 14/150 - Train Loss: 0.188799, Val Loss: 0.817092 +2025-07-04 14:26:13,282 - INFO - Epoch 15/150 - Train Loss: 0.181614, Val Loss: 0.227752 +2025-07-04 14:26:13,300 - INFO - New best model saved with Val Loss: 0.227752 +2025-07-04 14:26:31,012 - INFO - Epoch 16/150 - Train Loss: 0.177835, Val Loss: 0.312880 +2025-07-04 14:26:48,764 - INFO - Epoch 17/150 - Train Loss: 0.174542, Val Loss: 0.204217 +2025-07-04 14:26:48,780 - INFO - New best model saved with Val Loss: 0.204217 +2025-07-04 14:27:06,561 - INFO - Epoch 18/150 - Train Loss: 0.171377, Val Loss: 0.178336 +2025-07-04 14:27:06,576 - INFO - New best model saved with Val Loss: 0.178336 +2025-07-04 14:27:24,362 - INFO - Epoch 19/150 - Train Loss: 0.167564, Val Loss: 0.218340 +2025-07-04 14:27:42,141 - INFO - Epoch 20/150 - Train Loss: 0.167842, Val Loss: 0.332284 +2025-07-04 14:27:59,984 - INFO - Epoch 21/150 - Train Loss: 0.163059, Val Loss: 0.209619 +2025-07-04 14:28:17,759 - INFO - Epoch 22/150 - Train Loss: 0.160918, Val Loss: 0.248925 +2025-07-04 14:28:35,486 - INFO - Epoch 23/150 - Train Loss: 0.157175, Val Loss: 0.624631 +2025-07-04 14:28:53,297 - INFO - Epoch 24/150 - Train Loss: 0.156025, Val Loss: 0.170956 +2025-07-04 14:28:53,314 - INFO - New best model saved with Val Loss: 0.170956 +2025-07-04 14:29:11,030 - INFO - Epoch 25/150 - Train Loss: 0.153771, Val Loss: 0.321865 +2025-07-04 14:29:28,755 - INFO - Epoch 26/150 - Train Loss: 0.148996, Val Loss: 0.149375 +2025-07-04 14:29:28,771 - INFO - New best model saved with Val Loss: 0.149375 +2025-07-04 14:29:46,533 - INFO - Epoch 27/150 - Train Loss: 0.149616, Val Loss: 0.178216 +2025-07-04 14:30:04,300 - INFO - Epoch 28/150 - Train Loss: 0.154057, Val Loss: 0.161862 +2025-07-04 14:30:22,043 - INFO - Epoch 29/150 - Train Loss: 0.146227, Val Loss: 0.175928 +2025-07-04 14:30:39,780 - INFO - Epoch 30/150 - Train Loss: 0.146135, Val Loss: 0.377274 +2025-07-04 14:30:57,611 - INFO - Epoch 31/150 - Train Loss: 0.145402, Val Loss: 0.213034 +2025-07-04 14:31:15,385 - INFO - Epoch 32/150 - Train Loss: 0.142771, Val Loss: 0.151741 +2025-07-04 14:31:33,121 - INFO - Epoch 33/150 - Train Loss: 0.145088, Val Loss: 0.146101 +2025-07-04 14:31:33,136 - INFO - New best model saved with Val Loss: 0.146101 +2025-07-04 14:31:50,905 - INFO - Epoch 34/150 - Train Loss: 0.139568, Val Loss: 0.273251 +2025-07-04 14:32:08,681 - INFO - Epoch 35/150 - Train Loss: 0.140397, Val Loss: 0.148392 +2025-07-04 14:32:26,433 - INFO - Epoch 36/150 - Train Loss: 0.136741, Val Loss: 0.150139 +2025-07-04 14:32:44,209 - INFO - Epoch 37/150 - Train Loss: 0.135322, Val Loss: 0.148857 +2025-07-04 14:33:01,974 - INFO - Epoch 38/150 - Train Loss: 0.132527, Val Loss: 0.130883 +2025-07-04 14:33:01,989 - INFO - New best model saved with Val Loss: 0.130883 +2025-07-04 14:33:19,738 - INFO - Epoch 39/150 - Train Loss: 0.135555, Val Loss: 0.149993 +2025-07-04 14:33:37,525 - INFO - Epoch 40/150 - Train Loss: 0.135385, Val Loss: 0.164116 +2025-07-04 14:33:55,391 - INFO - Epoch 41/150 - Train Loss: 0.132284, Val Loss: 0.146587 +2025-07-04 14:34:13,111 - INFO - Epoch 42/150 - Train Loss: 0.133476, Val Loss: 0.166374 +2025-07-04 14:34:30,853 - INFO - Epoch 43/150 - Train Loss: 0.132089, Val Loss: 0.138480 +2025-07-04 14:34:48,613 - INFO - Epoch 44/150 - Train Loss: 0.131016, Val Loss: 0.134061 +2025-07-04 14:35:06,365 - INFO - Epoch 45/150 - Train Loss: 0.131406, Val Loss: 0.128516 +2025-07-04 14:35:06,381 - INFO - New best model saved with Val Loss: 0.128516 +2025-07-04 14:35:24,088 - INFO - Epoch 46/150 - Train Loss: 0.127681, Val Loss: 0.202615 +2025-07-04 14:35:41,829 - INFO - Epoch 47/150 - Train Loss: 0.126269, Val Loss: 0.136413 +2025-07-04 14:35:59,606 - INFO - Epoch 48/150 - Train Loss: 0.128555, Val Loss: 0.211257 +2025-07-04 14:36:17,334 - INFO - Epoch 49/150 - Train Loss: 0.128324, Val Loss: 0.134678 +2025-07-04 14:36:35,082 - INFO - Epoch 50/150 - Train Loss: 0.125648, Val Loss: 0.164913 +2025-07-04 14:36:52,912 - INFO - Epoch 51/150 - Train Loss: 0.124183, Val Loss: 0.121055 +2025-07-04 14:36:52,928 - INFO - New best model saved with Val Loss: 0.121055 +2025-07-04 14:37:10,653 - INFO - Epoch 52/150 - Train Loss: 0.125603, Val Loss: 0.207307 +2025-07-04 14:37:28,422 - INFO - Epoch 53/150 - Train Loss: 0.122962, Val Loss: 0.134787 +2025-07-04 14:37:46,178 - INFO - Epoch 54/150 - Train Loss: 0.122586, Val Loss: 0.152079 +2025-07-04 14:38:03,931 - INFO - Epoch 55/150 - Train Loss: 0.122021, Val Loss: 0.123622 +2025-07-04 14:38:21,660 - INFO - Epoch 56/150 - Train Loss: 0.121857, Val Loss: 0.212121 +2025-07-04 14:38:39,435 - INFO - Epoch 57/150 - Train Loss: 0.123328, Val Loss: 0.150480 +2025-07-04 14:38:57,197 - INFO - Epoch 58/150 - Train Loss: 0.122286, Val Loss: 0.167857 +2025-07-04 14:39:14,939 - INFO - Epoch 59/150 - Train Loss: 0.121922, Val Loss: 0.140145 +2025-07-04 14:39:32,703 - INFO - Epoch 60/150 - Train Loss: 0.117993, Val Loss: 0.169449 +2025-07-04 14:39:50,612 - INFO - Epoch 61/150 - Train Loss: 0.120024, Val Loss: 0.118396 +2025-07-04 14:39:50,628 - INFO - New best model saved with Val Loss: 0.118396 +2025-07-04 14:40:08,427 - INFO - Epoch 62/150 - Train Loss: 0.120212, Val Loss: 0.193534 +2025-07-04 14:40:26,182 - INFO - Epoch 63/150 - Train Loss: 0.121789, Val Loss: 0.137048 +2025-07-04 14:40:43,961 - INFO - Epoch 64/150 - Train Loss: 0.119106, Val Loss: 0.115732 +2025-07-04 14:40:43,978 - INFO - New best model saved with Val Loss: 0.115732 +2025-07-04 14:41:01,737 - INFO - Epoch 65/150 - Train Loss: 0.116286, Val Loss: 0.150539 +2025-07-04 14:41:19,486 - INFO - Epoch 66/150 - Train Loss: 0.116608, Val Loss: 0.204416 +2025-07-04 14:41:37,251 - INFO - Epoch 67/150 - Train Loss: 0.116302, Val Loss: 0.160922 +2025-07-04 14:41:54,988 - INFO - Epoch 68/150 - Train Loss: 0.120537, Val Loss: 0.140712 +2025-07-04 14:42:12,760 - INFO - Epoch 69/150 - Train Loss: 0.116008, Val Loss: 0.133827 +2025-07-04 14:42:30,473 - INFO - Epoch 70/150 - Train Loss: 0.117500, Val Loss: 0.125367 +2025-07-04 14:42:48,329 - INFO - Epoch 71/150 - Train Loss: 0.115497, Val Loss: 0.111867 +2025-07-04 14:42:48,346 - INFO - New best model saved with Val Loss: 0.111867 +2025-07-04 14:43:06,082 - INFO - Epoch 72/150 - Train Loss: 0.113848, Val Loss: 0.149078 +2025-07-04 14:43:23,840 - INFO - Epoch 73/150 - Train Loss: 0.115463, Val Loss: 0.112712 +2025-07-04 14:43:41,626 - INFO - Epoch 74/150 - Train Loss: 0.112697, Val Loss: 0.117975 +2025-07-04 14:43:59,360 - INFO - Epoch 75/150 - Train Loss: 0.116695, Val Loss: 0.156708 +2025-07-04 14:44:17,131 - INFO - Epoch 76/150 - Train Loss: 0.115100, Val Loss: 0.130494 +2025-07-04 14:44:34,889 - INFO - Epoch 77/150 - Train Loss: 0.112540, Val Loss: 0.115179 +2025-07-04 14:44:52,656 - INFO - Epoch 78/150 - Train Loss: 0.111732, Val Loss: 0.116926 +2025-07-04 14:45:10,370 - INFO - Epoch 79/150 - Train Loss: 0.110727, Val Loss: 0.126542 +2025-07-04 14:45:28,138 - INFO - Epoch 80/150 - Train Loss: 0.111588, Val Loss: 0.117037 +2025-07-04 14:45:46,031 - INFO - Epoch 81/150 - Train Loss: 0.111193, Val Loss: 0.166504 +2025-07-04 14:46:03,767 - INFO - Epoch 82/150 - Train Loss: 0.111560, Val Loss: 0.212419 +2025-07-04 14:46:21,538 - INFO - Epoch 83/150 - Train Loss: 0.103109, Val Loss: 0.092613 +2025-07-04 14:46:21,553 - INFO - New best model saved with Val Loss: 0.092613 +2025-07-04 14:46:39,289 - INFO - Epoch 84/150 - Train Loss: 0.098936, Val Loss: 0.090915 +2025-07-04 14:46:39,303 - INFO - New best model saved with Val Loss: 0.090915 +2025-07-04 14:46:57,077 - INFO - Epoch 85/150 - Train Loss: 0.097921, Val Loss: 0.090856 +2025-07-04 14:46:57,092 - INFO - New best model saved with Val Loss: 0.090856 +2025-07-04 14:47:14,839 - INFO - Epoch 86/150 - Train Loss: 0.097712, Val Loss: 0.091261 +2025-07-04 14:47:32,627 - INFO - Epoch 87/150 - Train Loss: 0.097985, Val Loss: 0.089322 +2025-07-04 14:47:32,643 - INFO - New best model saved with Val Loss: 0.089322 +2025-07-04 14:47:50,408 - INFO - Epoch 88/150 - Train Loss: 0.097777, Val Loss: 0.090429 +2025-07-04 14:48:08,166 - INFO - Epoch 89/150 - Train Loss: 0.097160, Val Loss: 0.090603 +2025-07-04 14:48:25,943 - INFO - Epoch 90/150 - Train Loss: 0.097306, Val Loss: 0.090642 +2025-07-04 14:48:43,783 - INFO - Epoch 91/150 - Train Loss: 0.096543, Val Loss: 0.092955 +2025-07-04 14:49:01,511 - INFO - Epoch 92/150 - Train Loss: 0.097235, Val Loss: 0.090075 +2025-07-04 14:49:19,220 - INFO - Epoch 93/150 - Train Loss: 0.096742, Val Loss: 0.090382 +2025-07-04 14:49:36,970 - INFO - Epoch 94/150 - Train Loss: 0.095817, Val Loss: 0.090097 +2025-07-04 14:49:54,750 - INFO - Epoch 95/150 - Train Loss: 0.096246, Val Loss: 0.089551 +2025-07-04 14:50:12,486 - INFO - Epoch 96/150 - Train Loss: 0.095710, Val Loss: 0.088552 +2025-07-04 14:50:12,501 - INFO - New best model saved with Val Loss: 0.088552 +2025-07-04 14:50:30,231 - INFO - Epoch 97/150 - Train Loss: 0.096374, Val Loss: 0.089566 +2025-07-04 14:50:47,938 - INFO - Epoch 98/150 - Train Loss: 0.096752, Val Loss: 0.088469 +2025-07-04 14:50:47,953 - INFO - New best model saved with Val Loss: 0.088469 +2025-07-04 14:51:05,705 - INFO - Epoch 99/150 - Train Loss: 0.093261, Val Loss: 0.089550 +2025-07-04 14:51:23,544 - INFO - Epoch 100/150 - Train Loss: 0.095612, Val Loss: 0.096346 +2025-07-04 14:51:45,063 - INFO - Epoch 101/150 - Train Loss: 0.096476, Val Loss: 0.128738 +2025-07-04 14:52:06,414 - INFO - Epoch 102/150 - Train Loss: 0.096039, Val Loss: 0.090330 +2025-07-04 14:52:26,856 - INFO - Epoch 103/150 - Train Loss: 0.094943, Val Loss: 0.120177 +2025-07-04 14:52:45,026 - INFO - Epoch 104/150 - Train Loss: 0.095132, Val Loss: 0.088605 +2025-07-04 14:53:02,876 - INFO - Epoch 105/150 - Train Loss: 0.095240, Val Loss: 0.088804 +2025-07-04 14:53:20,712 - INFO - Epoch 106/150 - Train Loss: 0.095847, Val Loss: 0.092196 +2025-07-04 14:53:38,570 - INFO - Epoch 107/150 - Train Loss: 0.095251, Val Loss: 0.097188 +2025-07-04 14:53:56,448 - INFO - Epoch 108/150 - Train Loss: 0.096115, Val Loss: 0.095816 +2025-07-04 14:54:14,302 - INFO - Epoch 109/150 - Train Loss: 0.095379, Val Loss: 0.088785 +2025-07-04 14:54:32,125 - INFO - Epoch 110/150 - Train Loss: 0.094124, Val Loss: 0.086502 +2025-07-04 14:54:32,146 - INFO - New best model saved with Val Loss: 0.086502 +2025-07-04 14:54:50,077 - INFO - Epoch 111/150 - Train Loss: 0.093858, Val Loss: 0.087059 +2025-07-04 14:55:07,937 - INFO - Epoch 112/150 - Train Loss: 0.094762, Val Loss: 0.086803 +2025-07-04 14:55:25,735 - INFO - Epoch 113/150 - Train Loss: 0.094220, Val Loss: 0.086305 +2025-07-04 14:55:25,750 - INFO - New best model saved with Val Loss: 0.086305 +2025-07-04 14:55:43,608 - INFO - Epoch 114/150 - Train Loss: 0.093979, Val Loss: 0.086554 +2025-07-04 14:56:01,460 - INFO - Epoch 115/150 - Train Loss: 0.093502, Val Loss: 0.086534 +2025-07-04 14:56:19,303 - INFO - Epoch 116/150 - Train Loss: 0.093854, Val Loss: 0.086689 +2025-07-04 14:56:37,142 - INFO - Epoch 117/150 - Train Loss: 0.093443, Val Loss: 0.086520 +2025-07-04 14:56:54,970 - INFO - Epoch 118/150 - Train Loss: 0.093456, Val Loss: 0.086879 +2025-07-04 14:57:12,822 - INFO - Epoch 119/150 - Train Loss: 0.093429, Val Loss: 0.086754 +2025-07-04 14:57:30,670 - INFO - Epoch 120/150 - Train Loss: 0.093038, Val Loss: 0.086636 +2025-07-04 14:57:48,584 - INFO - Epoch 121/150 - Train Loss: 0.093188, Val Loss: 0.086384 +2025-07-04 14:58:07,979 - INFO - Epoch 122/150 - Train Loss: 0.093174, Val Loss: 0.086662 +2025-07-04 14:58:25,809 - INFO - Epoch 123/150 - Train Loss: 0.093458, Val Loss: 0.086432 +2025-07-04 14:58:43,689 - INFO - Epoch 124/150 - Train Loss: 0.093784, Val Loss: 0.086574 +2025-07-04 14:59:01,520 - INFO - Epoch 125/150 - Train Loss: 0.093038, Val Loss: 0.086059 +2025-07-04 14:59:01,535 - INFO - New best model saved with Val Loss: 0.086059 +2025-07-04 14:59:19,302 - INFO - Epoch 126/150 - Train Loss: 0.091458, Val Loss: 0.086364 +2025-07-04 14:59:37,119 - INFO - Epoch 127/150 - Train Loss: 0.092800, Val Loss: 0.086594 +2025-07-04 14:59:54,932 - INFO - Epoch 128/150 - Train Loss: 0.093453, Val Loss: 0.086473 +2025-07-04 15:00:12,737 - INFO - Epoch 129/150 - Train Loss: 0.092883, Val Loss: 0.086414 +2025-07-04 15:00:30,462 - INFO - Epoch 130/150 - Train Loss: 0.093339, Val Loss: 0.086173 +2025-07-04 15:00:48,311 - INFO - Epoch 131/150 - Train Loss: 0.093019, Val Loss: 0.086383 +2025-07-04 15:01:06,067 - INFO - Epoch 132/150 - Train Loss: 0.093502, Val Loss: 0.086444 +2025-07-04 15:01:23,816 - INFO - Epoch 133/150 - Train Loss: 0.093330, Val Loss: 0.086222 +2025-07-04 15:01:41,541 - INFO - Epoch 134/150 - Train Loss: 0.093427, Val Loss: 0.086623 +2025-07-04 15:01:59,274 - INFO - Epoch 135/150 - Train Loss: 0.093817, Val Loss: 0.086278 +2025-07-04 15:02:17,003 - INFO - Epoch 136/150 - Train Loss: 0.093156, Val Loss: 0.086409 +2025-07-04 15:02:34,755 - INFO - Epoch 137/150 - Train Loss: 0.092966, Val Loss: 0.086330 +2025-07-04 15:02:52,485 - INFO - Epoch 138/150 - Train Loss: 0.093484, Val Loss: 0.086202 +2025-07-04 15:03:10,228 - INFO - Epoch 139/150 - Train Loss: 0.092633, Val Loss: 0.086555 +2025-07-04 15:03:27,994 - INFO - Epoch 140/150 - Train Loss: 0.092953, Val Loss: 0.086549 +2025-07-04 15:03:49,838 - INFO - Epoch 141/150 - Train Loss: 0.092855, Val Loss: 0.086349 +2025-07-04 15:04:07,975 - INFO - Epoch 142/150 - Train Loss: 0.093116, Val Loss: 0.086281 +2025-07-04 15:04:25,760 - INFO - Epoch 143/150 - Train Loss: 0.093006, Val Loss: 0.086377 +2025-07-04 15:04:43,548 - INFO - Epoch 144/150 - Train Loss: 0.092932, Val Loss: 0.086349 +2025-07-04 15:05:01,354 - INFO - Epoch 145/150 - Train Loss: 0.092390, Val Loss: 0.086420 +2025-07-04 15:05:19,137 - INFO - Epoch 146/150 - Train Loss: 0.093185, Val Loss: 0.086400 +2025-07-04 15:05:36,924 - INFO - Epoch 147/150 - Train Loss: 0.093196, Val Loss: 0.086339 +2025-07-04 15:05:54,688 - INFO - Epoch 148/150 - Train Loss: 0.092870, Val Loss: 0.086243 +2025-07-04 15:06:12,465 - INFO - Epoch 149/150 - Train Loss: 0.092973, Val Loss: 0.086341 +2025-07-04 15:06:30,243 - INFO - Epoch 150/150 - Train Loss: 0.093295, Val Loss: 0.086364 +2025-07-04 15:06:30,393 - INFO - Final model saved to experiments/Train_Test/final_model_tmp +2025-07-04 15:06:30,403 - INFO - Testing the final model +2025-07-04 15:06:30,403 - INFO - Testing the best model +2025-07-04 15:15:36,984 - INFO - args.exp_name : Train_Test +2025-07-04 15:15:36,990 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-04 15:15:36,990 - INFO - Starting training with 1 GPUs +2025-07-04 15:15:41,726 - INFO - Total trainable parameters: 1437705 +2025-07-04 15:15:41,874 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-04 15:15:41,878 - INFO - Staring training for 150 epochs +2025-07-04 15:16:02,360 - INFO - Epoch 1/150 - Train Loss: 0.765392, Val Loss: 1.117532 +2025-07-04 15:16:02,391 - INFO - New best model saved with Val Loss: 1.117532 +2025-07-04 15:16:20,170 - INFO - Epoch 2/150 - Train Loss: 0.425968, Val Loss: 0.480214 +2025-07-04 15:16:20,186 - INFO - New best model saved with Val Loss: 0.480214 +2025-07-04 15:16:37,996 - INFO - Epoch 3/150 - Train Loss: 0.359811, Val Loss: 0.444877 +2025-07-04 15:16:38,011 - INFO - New best model saved with Val Loss: 0.444877 +2025-07-04 15:16:55,774 - INFO - Epoch 4/150 - Train Loss: 0.312085, Val Loss: 0.590544 +2025-07-04 15:17:13,544 - INFO - Epoch 5/150 - Train Loss: 0.284683, Val Loss: 0.833210 +2025-07-04 15:17:31,339 - INFO - Epoch 6/150 - Train Loss: 0.266364, Val Loss: 1.044873 +2025-07-04 15:17:49,153 - INFO - Epoch 7/150 - Train Loss: 0.244517, Val Loss: 0.458772 +2025-07-04 15:18:06,932 - INFO - Epoch 8/150 - Train Loss: 0.227804, Val Loss: 1.118219 +2025-07-04 15:18:24,732 - INFO - Epoch 9/150 - Train Loss: 0.222722, Val Loss: 0.292218 +2025-07-04 15:18:24,749 - INFO - New best model saved with Val Loss: 0.292218 +2025-07-04 15:18:42,524 - INFO - Epoch 10/150 - Train Loss: 0.213346, Val Loss: 0.264306 +2025-07-04 15:18:42,539 - INFO - New best model saved with Val Loss: 0.264306 +2025-07-04 15:19:00,432 - INFO - Epoch 11/150 - Train Loss: 0.198541, Val Loss: 1.010461 +2025-07-04 15:19:18,210 - INFO - Epoch 12/150 - Train Loss: 0.188102, Val Loss: 0.402349 +2025-07-04 15:19:35,960 - INFO - Epoch 13/150 - Train Loss: 0.190205, Val Loss: 0.351225 +2025-07-04 15:19:53,716 - INFO - Epoch 14/150 - Train Loss: 0.188799, Val Loss: 0.817092 +2025-07-04 15:20:11,474 - INFO - Epoch 15/150 - Train Loss: 0.181614, Val Loss: 0.227752 +2025-07-04 15:20:11,491 - INFO - New best model saved with Val Loss: 0.227752 +2025-07-04 15:20:29,255 - INFO - Epoch 16/150 - Train Loss: 0.177835, Val Loss: 0.312880 +2025-07-04 15:20:47,059 - INFO - Epoch 17/150 - Train Loss: 0.174542, Val Loss: 0.204217 +2025-07-04 15:20:47,074 - INFO - New best model saved with Val Loss: 0.204217 +2025-07-04 15:21:04,848 - INFO - Epoch 18/150 - Train Loss: 0.171377, Val Loss: 0.178336 +2025-07-04 15:21:04,862 - INFO - New best model saved with Val Loss: 0.178336 +2025-07-04 15:21:22,641 - INFO - Epoch 19/150 - Train Loss: 0.167564, Val Loss: 0.218340 +2025-07-04 15:21:40,380 - INFO - Epoch 20/150 - Train Loss: 0.167842, Val Loss: 0.332284 +2025-07-04 15:21:58,255 - INFO - Epoch 21/150 - Train Loss: 0.163059, Val Loss: 0.209619 +2025-07-04 15:22:16,009 - INFO - Epoch 22/150 - Train Loss: 0.160918, Val Loss: 0.248925 +2025-07-04 15:22:33,781 - INFO - Epoch 23/150 - Train Loss: 0.157175, Val Loss: 0.624631 +2025-07-04 15:22:51,781 - INFO - Epoch 24/150 - Train Loss: 0.156025, Val Loss: 0.170956 +2025-07-04 15:22:51,799 - INFO - New best model saved with Val Loss: 0.170956 +2025-07-04 15:23:09,662 - INFO - Epoch 25/150 - Train Loss: 0.153771, Val Loss: 0.321865 +2025-07-04 15:23:27,404 - INFO - Epoch 26/150 - Train Loss: 0.148996, Val Loss: 0.149375 +2025-07-04 15:23:27,419 - INFO - New best model saved with Val Loss: 0.149375 +2025-07-04 15:23:45,201 - INFO - Epoch 27/150 - Train Loss: 0.149616, Val Loss: 0.178216 +2025-07-04 15:24:02,950 - INFO - Epoch 28/150 - Train Loss: 0.154057, Val Loss: 0.161862 +2025-07-04 15:24:20,713 - INFO - Epoch 29/150 - Train Loss: 0.146227, Val Loss: 0.175928 +2025-07-04 15:24:38,497 - INFO - Epoch 30/150 - Train Loss: 0.146135, Val Loss: 0.377274 +2025-07-04 15:24:56,388 - INFO - Epoch 31/150 - Train Loss: 0.145402, Val Loss: 0.213034 +2025-07-04 15:25:14,147 - INFO - Epoch 32/150 - Train Loss: 0.142771, Val Loss: 0.151741 +2025-07-04 15:25:31,947 - INFO - Epoch 33/150 - Train Loss: 0.145088, Val Loss: 0.146101 +2025-07-04 15:25:31,962 - INFO - New best model saved with Val Loss: 0.146101 +2025-07-04 15:25:49,724 - INFO - Epoch 34/150 - Train Loss: 0.139568, Val Loss: 0.273251 +2025-07-04 15:26:07,553 - INFO - Epoch 35/150 - Train Loss: 0.140397, Val Loss: 0.148392 +2025-07-04 15:26:25,346 - INFO - Epoch 36/150 - Train Loss: 0.136741, Val Loss: 0.150139 +2025-07-04 15:26:43,163 - INFO - Epoch 37/150 - Train Loss: 0.135322, Val Loss: 0.148857 +2025-07-04 15:27:00,933 - INFO - Epoch 38/150 - Train Loss: 0.132527, Val Loss: 0.130883 +2025-07-04 15:27:00,950 - INFO - New best model saved with Val Loss: 0.130883 +2025-07-04 15:27:18,727 - INFO - Epoch 39/150 - Train Loss: 0.135555, Val Loss: 0.149993 +2025-07-04 15:27:36,506 - INFO - Epoch 40/150 - Train Loss: 0.135385, Val Loss: 0.164116 +2025-07-04 15:27:54,408 - INFO - Epoch 41/150 - Train Loss: 0.132284, Val Loss: 0.146587 +2025-07-04 15:28:12,224 - INFO - Epoch 42/150 - Train Loss: 0.133476, Val Loss: 0.166374 +2025-07-04 15:28:29,993 - INFO - Epoch 43/150 - Train Loss: 0.132089, Val Loss: 0.138480 +2025-07-04 15:28:47,812 - INFO - Epoch 44/150 - Train Loss: 0.131016, Val Loss: 0.134061 +2025-07-04 15:29:05,585 - INFO - Epoch 45/150 - Train Loss: 0.131406, Val Loss: 0.128516 +2025-07-04 15:29:05,602 - INFO - New best model saved with Val Loss: 0.128516 +2025-07-04 15:29:23,388 - INFO - Epoch 46/150 - Train Loss: 0.127681, Val Loss: 0.202615 +2025-07-04 15:29:41,156 - INFO - Epoch 47/150 - Train Loss: 0.126269, Val Loss: 0.136413 +2025-07-04 15:29:58,934 - INFO - Epoch 48/150 - Train Loss: 0.128555, Val Loss: 0.211257 +2025-07-04 15:30:16,712 - INFO - Epoch 49/150 - Train Loss: 0.128324, Val Loss: 0.134678 +2025-07-04 15:30:34,450 - INFO - Epoch 50/150 - Train Loss: 0.125648, Val Loss: 0.164913 +2025-07-04 15:30:52,325 - INFO - Epoch 51/150 - Train Loss: 0.124183, Val Loss: 0.121055 +2025-07-04 15:30:52,341 - INFO - New best model saved with Val Loss: 0.121055 +2025-07-04 15:31:10,131 - INFO - Epoch 52/150 - Train Loss: 0.125603, Val Loss: 0.207307 +2025-07-04 15:31:27,893 - INFO - Epoch 53/150 - Train Loss: 0.122962, Val Loss: 0.134787 +2025-07-04 15:31:45,644 - INFO - Epoch 54/150 - Train Loss: 0.122586, Val Loss: 0.152079 +2025-07-04 15:32:03,411 - INFO - Epoch 55/150 - Train Loss: 0.122021, Val Loss: 0.123622 +2025-07-04 15:32:21,178 - INFO - Epoch 56/150 - Train Loss: 0.121857, Val Loss: 0.212121 +2025-07-04 15:32:38,954 - INFO - Epoch 57/150 - Train Loss: 0.123328, Val Loss: 0.150480 +2025-07-04 15:32:56,734 - INFO - Epoch 58/150 - Train Loss: 0.122286, Val Loss: 0.167857 +2025-07-04 15:33:14,497 - INFO - Epoch 59/150 - Train Loss: 0.121922, Val Loss: 0.140145 +2025-07-04 15:33:32,267 - INFO - Epoch 60/150 - Train Loss: 0.117993, Val Loss: 0.169449 +2025-07-04 15:33:50,172 - INFO - Epoch 61/150 - Train Loss: 0.120024, Val Loss: 0.118396 +2025-07-04 15:33:50,188 - INFO - New best model saved with Val Loss: 0.118396 +2025-07-04 15:34:07,961 - INFO - Epoch 62/150 - Train Loss: 0.120212, Val Loss: 0.193534 +2025-07-04 15:34:25,740 - INFO - Epoch 63/150 - Train Loss: 0.121789, Val Loss: 0.137048 +2025-07-04 15:34:43,498 - INFO - Epoch 64/150 - Train Loss: 0.119106, Val Loss: 0.115732 +2025-07-04 15:34:43,515 - INFO - New best model saved with Val Loss: 0.115732 +2025-07-04 15:35:01,323 - INFO - Epoch 65/150 - Train Loss: 0.116286, Val Loss: 0.150539 +2025-07-04 15:35:19,109 - INFO - Epoch 66/150 - Train Loss: 0.116608, Val Loss: 0.204416 +2025-07-04 15:35:37,762 - INFO - Epoch 67/150 - Train Loss: 0.116302, Val Loss: 0.160922 +2025-07-04 15:35:55,688 - INFO - Epoch 68/150 - Train Loss: 0.120537, Val Loss: 0.140712 +2025-07-04 15:36:13,459 - INFO - Epoch 69/150 - Train Loss: 0.116008, Val Loss: 0.133827 +2025-07-04 15:36:31,233 - INFO - Epoch 70/150 - Train Loss: 0.117500, Val Loss: 0.125367 +2025-07-04 15:36:49,083 - INFO - Epoch 71/150 - Train Loss: 0.115497, Val Loss: 0.111867 +2025-07-04 15:36:49,097 - INFO - New best model saved with Val Loss: 0.111867 +2025-07-04 15:37:06,811 - INFO - Epoch 72/150 - Train Loss: 0.113848, Val Loss: 0.149078 +2025-07-04 15:37:24,545 - INFO - Epoch 73/150 - Train Loss: 0.115463, Val Loss: 0.112712 +2025-07-04 15:37:42,269 - INFO - Epoch 74/150 - Train Loss: 0.112697, Val Loss: 0.117975 +2025-07-04 15:38:00,035 - INFO - Epoch 75/150 - Train Loss: 0.116695, Val Loss: 0.156708 +2025-07-04 15:38:17,799 - INFO - Epoch 76/150 - Train Loss: 0.115100, Val Loss: 0.130494 +2025-07-04 15:38:35,572 - INFO - Epoch 77/150 - Train Loss: 0.112540, Val Loss: 0.115179 +2025-07-04 15:38:53,355 - INFO - Epoch 78/150 - Train Loss: 0.111732, Val Loss: 0.116926 +2025-07-04 15:39:11,108 - INFO - Epoch 79/150 - Train Loss: 0.110727, Val Loss: 0.126542 +2025-07-04 15:39:28,843 - INFO - Epoch 80/150 - Train Loss: 0.111588, Val Loss: 0.117037 +2025-07-04 15:39:46,704 - INFO - Epoch 81/150 - Train Loss: 0.111193, Val Loss: 0.166504 +2025-07-04 15:40:04,431 - INFO - Epoch 82/150 - Train Loss: 0.111560, Val Loss: 0.212419 +2025-07-04 15:40:22,223 - INFO - Epoch 83/150 - Train Loss: 0.103109, Val Loss: 0.092613 +2025-07-04 15:40:22,238 - INFO - New best model saved with Val Loss: 0.092613 +2025-07-04 15:40:40,028 - INFO - Epoch 84/150 - Train Loss: 0.098936, Val Loss: 0.090915 +2025-07-04 15:40:40,042 - INFO - New best model saved with Val Loss: 0.090915 +2025-07-04 15:40:57,829 - INFO - Epoch 85/150 - Train Loss: 0.097921, Val Loss: 0.090856 +2025-07-04 15:40:57,843 - INFO - New best model saved with Val Loss: 0.090856 +2025-07-04 15:41:15,643 - INFO - Epoch 86/150 - Train Loss: 0.097712, Val Loss: 0.091261 +2025-07-04 15:41:33,427 - INFO - Epoch 87/150 - Train Loss: 0.097985, Val Loss: 0.089322 +2025-07-04 15:41:33,442 - INFO - New best model saved with Val Loss: 0.089322 +2025-07-04 15:41:51,196 - INFO - Epoch 88/150 - Train Loss: 0.097777, Val Loss: 0.090429 +2025-07-04 15:42:09,003 - INFO - Epoch 89/150 - Train Loss: 0.097160, Val Loss: 0.090603 +2025-07-04 15:42:26,775 - INFO - Epoch 90/150 - Train Loss: 0.097306, Val Loss: 0.090642 +2025-07-04 15:42:44,630 - INFO - Epoch 91/150 - Train Loss: 0.096543, Val Loss: 0.092955 +2025-07-04 15:43:02,436 - INFO - Epoch 92/150 - Train Loss: 0.097235, Val Loss: 0.090075 +2025-07-04 15:43:20,208 - INFO - Epoch 93/150 - Train Loss: 0.096742, Val Loss: 0.090382 +2025-07-04 15:43:37,988 - INFO - Epoch 94/150 - Train Loss: 0.095817, Val Loss: 0.090097 +2025-07-04 15:43:55,726 - INFO - Epoch 95/150 - Train Loss: 0.096246, Val Loss: 0.089551 +2025-07-04 15:44:13,447 - INFO - Epoch 96/150 - Train Loss: 0.095710, Val Loss: 0.088552 +2025-07-04 15:44:13,463 - INFO - New best model saved with Val Loss: 0.088552 +2025-07-04 15:44:32,161 - INFO - Epoch 97/150 - Train Loss: 0.096374, Val Loss: 0.089566 +2025-07-04 15:44:49,989 - INFO - Epoch 98/150 - Train Loss: 0.096752, Val Loss: 0.088469 +2025-07-04 15:44:50,004 - INFO - New best model saved with Val Loss: 0.088469 +2025-07-04 15:45:07,732 - INFO - Epoch 99/150 - Train Loss: 0.093261, Val Loss: 0.089550 +2025-07-04 15:45:25,488 - INFO - Epoch 100/150 - Train Loss: 0.095612, Val Loss: 0.096346 +2025-07-04 15:45:43,338 - INFO - Epoch 101/150 - Train Loss: 0.096476, Val Loss: 0.128738 +2025-07-04 15:46:01,074 - INFO - Epoch 102/150 - Train Loss: 0.096039, Val Loss: 0.090330 +2025-07-04 15:46:18,846 - INFO - Epoch 103/150 - Train Loss: 0.094943, Val Loss: 0.120177 +2025-07-04 15:46:37,916 - INFO - Epoch 104/150 - Train Loss: 0.095132, Val Loss: 0.088605 +2025-07-04 15:46:55,702 - INFO - Epoch 105/150 - Train Loss: 0.095240, Val Loss: 0.088804 +2025-07-04 15:47:13,555 - INFO - Epoch 106/150 - Train Loss: 0.095847, Val Loss: 0.092196 +2025-07-04 15:47:31,325 - INFO - Epoch 107/150 - Train Loss: 0.095251, Val Loss: 0.097188 +2025-07-04 15:47:49,111 - INFO - Epoch 108/150 - Train Loss: 0.096115, Val Loss: 0.095816 +2025-07-04 15:48:06,856 - INFO - Epoch 109/150 - Train Loss: 0.095379, Val Loss: 0.088785 +2025-07-04 15:48:24,609 - INFO - Epoch 110/150 - Train Loss: 0.094124, Val Loss: 0.086502 +2025-07-04 15:48:24,624 - INFO - New best model saved with Val Loss: 0.086502 +2025-07-04 15:48:42,492 - INFO - Epoch 111/150 - Train Loss: 0.093858, Val Loss: 0.087059 +2025-07-04 15:49:00,278 - INFO - Epoch 112/150 - Train Loss: 0.094762, Val Loss: 0.086803 +2025-07-04 15:49:18,019 - INFO - Epoch 113/150 - Train Loss: 0.094220, Val Loss: 0.086305 +2025-07-04 15:49:18,034 - INFO - New best model saved with Val Loss: 0.086305 +2025-07-04 15:49:35,785 - INFO - Epoch 114/150 - Train Loss: 0.093979, Val Loss: 0.086554 +2025-07-04 15:49:53,555 - INFO - Epoch 115/150 - Train Loss: 0.093502, Val Loss: 0.086534 +2025-07-04 15:50:11,325 - INFO - Epoch 116/150 - Train Loss: 0.093854, Val Loss: 0.086689 +2025-07-04 15:50:29,207 - INFO - Epoch 117/150 - Train Loss: 0.093443, Val Loss: 0.086520 +2025-07-04 15:50:46,971 - INFO - Epoch 118/150 - Train Loss: 0.093456, Val Loss: 0.086879 +2025-07-04 15:51:04,737 - INFO - Epoch 119/150 - Train Loss: 0.093429, Val Loss: 0.086754 +2025-07-04 15:51:22,596 - INFO - Epoch 120/150 - Train Loss: 0.093038, Val Loss: 0.086636 +2025-07-04 15:51:40,490 - INFO - Epoch 121/150 - Train Loss: 0.093188, Val Loss: 0.086384 +2025-07-04 15:51:58,259 - INFO - Epoch 122/150 - Train Loss: 0.093174, Val Loss: 0.086662 +2025-07-04 15:52:16,027 - INFO - Epoch 123/150 - Train Loss: 0.093458, Val Loss: 0.086432 +2025-07-04 15:52:34,006 - INFO - Epoch 124/150 - Train Loss: 0.093784, Val Loss: 0.086574 +2025-07-04 15:52:51,859 - INFO - Epoch 125/150 - Train Loss: 0.093038, Val Loss: 0.086059 +2025-07-04 15:52:51,874 - INFO - New best model saved with Val Loss: 0.086059 +2025-07-04 15:53:09,642 - INFO - Epoch 126/150 - Train Loss: 0.091458, Val Loss: 0.086364 +2025-07-04 15:53:27,428 - INFO - Epoch 127/150 - Train Loss: 0.092800, Val Loss: 0.086594 +2025-07-04 15:53:45,191 - INFO - Epoch 128/150 - Train Loss: 0.093453, Val Loss: 0.086473 +2025-07-04 15:54:02,948 - INFO - Epoch 129/150 - Train Loss: 0.092883, Val Loss: 0.086414 +2025-07-04 15:54:20,730 - INFO - Epoch 130/150 - Train Loss: 0.093339, Val Loss: 0.086173 +2025-07-04 15:54:38,633 - INFO - Epoch 131/150 - Train Loss: 0.093019, Val Loss: 0.086383 +2025-07-04 15:54:56,401 - INFO - Epoch 132/150 - Train Loss: 0.093502, Val Loss: 0.086444 +2025-07-04 15:55:14,151 - INFO - Epoch 133/150 - Train Loss: 0.093330, Val Loss: 0.086222 +2025-07-04 15:55:31,917 - INFO - Epoch 134/150 - Train Loss: 0.093427, Val Loss: 0.086623 +2025-07-04 15:55:49,679 - INFO - Epoch 135/150 - Train Loss: 0.093817, Val Loss: 0.086278 +2025-07-04 15:56:07,471 - INFO - Epoch 136/150 - Train Loss: 0.093156, Val Loss: 0.086409 +2025-07-04 15:56:25,275 - INFO - Epoch 137/150 - Train Loss: 0.092966, Val Loss: 0.086330 +2025-07-04 15:56:43,680 - INFO - Epoch 138/150 - Train Loss: 0.093484, Val Loss: 0.086202 +2025-07-04 15:57:01,421 - INFO - Epoch 139/150 - Train Loss: 0.092633, Val Loss: 0.086555 +2025-07-04 15:57:19,162 - INFO - Epoch 140/150 - Train Loss: 0.092953, Val Loss: 0.086549 +2025-07-04 15:57:37,046 - INFO - Epoch 141/150 - Train Loss: 0.092855, Val Loss: 0.086349 +2025-07-04 15:57:54,789 - INFO - Epoch 142/150 - Train Loss: 0.093116, Val Loss: 0.086281 +2025-07-04 15:58:12,604 - INFO - Epoch 143/150 - Train Loss: 0.093006, Val Loss: 0.086377 +2025-07-04 15:58:30,350 - INFO - Epoch 144/150 - Train Loss: 0.092932, Val Loss: 0.086349 +2025-07-04 15:58:48,127 - INFO - Epoch 145/150 - Train Loss: 0.092390, Val Loss: 0.086420 +2025-07-04 15:59:05,911 - INFO - Epoch 146/150 - Train Loss: 0.093185, Val Loss: 0.086400 +2025-07-04 15:59:23,714 - INFO - Epoch 147/150 - Train Loss: 0.093196, Val Loss: 0.086339 +2025-07-04 15:59:43,290 - INFO - Epoch 148/150 - Train Loss: 0.092870, Val Loss: 0.086243 +2025-07-04 16:00:01,047 - INFO - Epoch 149/150 - Train Loss: 0.092973, Val Loss: 0.086341 +2025-07-04 16:00:18,859 - INFO - Epoch 150/150 - Train Loss: 0.093295, Val Loss: 0.086364 +2025-07-04 16:00:19,000 - INFO - Final model saved to experiments/Train_Test/final_model_tmp +2025-07-04 16:00:19,001 - INFO - Testing the final model +2025-07-04 16:00:19,001 - INFO - Testing the best model +2025-07-05 08:18:54,313 - INFO - args.exp_name : Train_Test +2025-07-05 08:18:54,316 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, test_only=False, num_workers=1, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-05 08:18:54,316 - INFO - Starting training with 1 GPUs +2025-07-05 08:19:02,149 - INFO - Total trainable parameters: 1437705 +2025-07-05 08:19:02,385 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-05 08:19:02,387 - INFO - Staring training for 150 epochs +2025-07-05 08:19:54,268 - INFO - Epoch 1/150 - Train Loss: 0.752180, Val Loss: 1.124732 +2025-07-05 08:19:54,315 - INFO - New best model saved with Val Loss: 1.124732 +2025-07-05 08:20:41,753 - INFO - Epoch 2/150 - Train Loss: 0.410244, Val Loss: 0.379351 +2025-07-05 08:20:41,780 - INFO - New best model saved with Val Loss: 0.379351 +2025-07-05 08:21:29,276 - INFO - Epoch 3/150 - Train Loss: 0.348636, Val Loss: 0.409999 +2025-07-05 08:21:43,964 - INFO - args.exp_name : Train_Test +2025-07-05 08:21:43,964 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, test_only=False, num_workers=1, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-05 08:21:43,964 - INFO - Starting training with 1 GPUs +2025-07-05 08:21:46,385 - INFO - Total trainable parameters: 1437705 +2025-07-05 08:21:46,570 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-05 08:21:46,571 - INFO - Staring training for 150 epochs +2025-07-05 08:22:35,918 - INFO - Epoch 1/150 - Train Loss: 0.752180, Val Loss: 1.124732 +2025-07-05 08:22:35,949 - INFO - New best model saved with Val Loss: 1.124732 +2025-07-05 08:35:12,858 - INFO - args.exp_name : Train_Test +2025-07-05 08:35:12,859 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, test_only=False, num_workers=1, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-05 08:35:12,859 - INFO - Starting training with 1 GPUs +2025-07-05 08:35:15,213 - INFO - Total trainable parameters: 1437705 +2025-07-05 08:35:15,398 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-05 08:35:15,413 - INFO - Staring training for 150 epochs +2025-07-05 08:36:29,926 - INFO - args.exp_name : Train_Test +2025-07-05 08:36:29,930 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, test_only=False, num_workers=1, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-05 08:36:29,930 - INFO - Starting training with 1 GPUs +2025-07-05 08:36:32,281 - INFO - Total trainable parameters: 1437705 +2025-07-05 08:36:32,466 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-05 08:36:32,468 - INFO - Staring training for 150 epochs +2025-07-05 08:37:21,723 - INFO - Epoch 1/150 - Train Loss: 0.752180, Val Loss: 1.124732 +2025-07-05 08:37:21,754 - INFO - New best model saved with Val Loss: 1.124732 +2025-07-05 08:38:17,877 - INFO - args.exp_name : Train_Test +2025-07-05 08:38:17,877 - INFO - Arguments: Namespace(exp_name='Train_Test', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, test_only=True, num_workers=1, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-05 08:38:17,877 - INFO - Starting training with 1 GPUs +2025-07-05 08:38:20,218 - INFO - Total trainable parameters: 1437705 +2025-07-05 08:38:20,404 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-05 08:38:20,406 - INFO - Loading best model for testing only +2025-07-05 08:38:27,928 - INFO - Total MSE across all processes: 60.050479888916016 +2025-07-05 08:38:27,931 - INFO - mean value for all_targets: {tmp} +2025-07-05 08:38:27,936 - INFO - Test MSE: 1.112046, Test MAE: 0.720236, Max AE: 20.402550, Test R2: 0.0066 +2025-07-05 08:38:27,936 - INFO - Relative L2 Error: 0.997422, Relative L1 error: 1.113837 +2025-07-05 08:38:27,936 - INFO - Total inference time: 0.75s for 54 samples +2025-07-05 09:05:29,111 - INFO - args.exp_name : Train_Test +2025-07-05 09:05:29,115 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 150, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 10000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 0} +2025-07-05 09:05:29,115 - INFO - Starting training with 1 GPUs +2025-07-05 09:05:31,379 - INFO - Total trainable parameters: 1437705 +2025-07-05 09:05:31,565 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-05 09:05:31,566 - INFO - Staring training for 150 epochs +2025-07-05 09:06:21,263 - INFO - Epoch 1/150 - Train Loss: 0.752180, Val Loss: 1.124732 +2025-07-05 09:06:21,292 - INFO - New best model saved with Val Loss: 1.124732 +2025-07-05 09:07:08,792 - INFO - Epoch 2/150 - Train Loss: 0.410244, Val Loss: 0.379351 +2025-07-05 09:07:08,819 - INFO - New best model saved with Val Loss: 0.379351 +2025-07-05 09:07:56,373 - INFO - Epoch 3/150 - Train Loss: 0.348636, Val Loss: 0.409999 +2025-07-05 09:08:43,886 - INFO - Epoch 4/150 - Train Loss: 0.313532, Val Loss: 0.295374 +2025-07-05 09:08:43,913 - INFO - New best model saved with Val Loss: 0.295374 +2025-07-05 09:09:31,494 - INFO - Epoch 5/150 - Train Loss: 0.286809, Val Loss: 0.481297 +2025-07-05 09:10:19,029 - INFO - Epoch 6/150 - Train Loss: 0.267345, Val Loss: 3.256105 +2025-07-05 09:11:06,563 - INFO - Epoch 7/150 - Train Loss: 0.248175, Val Loss: 0.297765 +2025-07-05 09:11:54,074 - INFO - Epoch 8/150 - Train Loss: 0.233266, Val Loss: 0.295982 +2025-07-05 09:12:41,595 - INFO - Epoch 9/150 - Train Loss: 0.223489, Val Loss: 0.655006 +2025-07-05 09:13:29,133 - INFO - Epoch 10/150 - Train Loss: 0.215305, Val Loss: 0.423533 +2025-07-05 09:14:16,998 - INFO - Epoch 11/150 - Train Loss: 0.203425, Val Loss: 0.388887 +2025-07-05 09:15:04,533 - INFO - Epoch 12/150 - Train Loss: 0.193738, Val Loss: 0.255016 +2025-07-05 09:15:04,561 - INFO - New best model saved with Val Loss: 0.255016 +2025-07-05 09:15:52,090 - INFO - Epoch 13/150 - Train Loss: 0.193163, Val Loss: 0.278204 +2025-07-05 09:16:39,612 - INFO - Epoch 14/150 - Train Loss: 0.191126, Val Loss: 0.221406 +2025-07-05 09:16:39,640 - INFO - New best model saved with Val Loss: 0.221406 +2025-07-05 09:17:27,161 - INFO - Epoch 15/150 - Train Loss: 0.184478, Val Loss: 0.215028 +2025-07-05 09:17:27,188 - INFO - New best model saved with Val Loss: 0.215028 +2025-07-05 09:18:14,711 - INFO - Epoch 16/150 - Train Loss: 0.178109, Val Loss: 0.325122 +2025-07-05 09:19:02,243 - INFO - Epoch 17/150 - Train Loss: 0.174796, Val Loss: 0.258391 +2025-07-05 09:19:49,737 - INFO - Epoch 18/150 - Train Loss: 0.174784, Val Loss: 0.225220 +2025-07-05 09:20:37,282 - INFO - Epoch 19/150 - Train Loss: 0.169611, Val Loss: 0.248846 +2025-07-05 09:21:24,805 - INFO - Epoch 20/150 - Train Loss: 0.167436, Val Loss: 0.471155 +2025-07-05 09:22:12,493 - INFO - Epoch 21/150 - Train Loss: 0.163698, Val Loss: 0.192040 +2025-07-05 09:22:12,520 - INFO - New best model saved with Val Loss: 0.192040 +2025-07-05 09:23:00,044 - INFO - Epoch 22/150 - Train Loss: 0.161416, Val Loss: 0.370374 +2025-07-05 09:23:47,686 - INFO - Epoch 23/150 - Train Loss: 0.158124, Val Loss: 0.158734 +2025-07-05 09:23:47,714 - INFO - New best model saved with Val Loss: 0.158734 +2025-07-05 09:24:35,246 - INFO - Epoch 24/150 - Train Loss: 0.155866, Val Loss: 0.205952 +2025-07-05 09:25:22,831 - INFO - Epoch 25/150 - Train Loss: 0.155378, Val Loss: 0.184193 +2025-07-05 09:26:10,362 - INFO - Epoch 26/150 - Train Loss: 0.150835, Val Loss: 0.167461 +2025-07-05 09:26:57,922 - INFO - Epoch 27/150 - Train Loss: 0.149848, Val Loss: 0.153055 +2025-07-05 09:26:57,949 - INFO - New best model saved with Val Loss: 0.153055 +2025-07-05 09:27:45,468 - INFO - Epoch 28/150 - Train Loss: 0.154199, Val Loss: 0.136550 +2025-07-05 09:27:45,495 - INFO - New best model saved with Val Loss: 0.136550 +2025-07-05 09:28:33,022 - INFO - Epoch 29/150 - Train Loss: 0.143742, Val Loss: 0.152760 +2025-07-05 09:29:20,569 - INFO - Epoch 30/150 - Train Loss: 0.144288, Val Loss: 0.193623 +2025-07-05 09:30:08,342 - INFO - Epoch 31/150 - Train Loss: 0.145189, Val Loss: 0.166961 +2025-07-05 09:30:55,884 - INFO - Epoch 32/150 - Train Loss: 0.141460, Val Loss: 0.157264 +2025-07-05 09:31:43,446 - INFO - Epoch 33/150 - Train Loss: 0.145188, Val Loss: 0.219487 +2025-07-05 09:32:30,967 - INFO - Epoch 34/150 - Train Loss: 0.138041, Val Loss: 0.179359 +2025-07-05 09:33:18,515 - INFO - Epoch 35/150 - Train Loss: 0.139006, Val Loss: 0.147838 +2025-07-05 09:34:06,066 - INFO - Epoch 36/150 - Train Loss: 0.133996, Val Loss: 0.173444 +2025-07-05 09:34:53,576 - INFO - Epoch 37/150 - Train Loss: 0.133981, Val Loss: 0.150009 +2025-07-05 09:35:41,059 - INFO - Epoch 38/150 - Train Loss: 0.130496, Val Loss: 0.138145 +2025-07-05 09:36:28,629 - INFO - Epoch 39/150 - Train Loss: 0.134783, Val Loss: 0.167007 +2025-07-05 09:37:16,187 - INFO - Epoch 40/150 - Train Loss: 0.123822, Val Loss: 0.106565 +2025-07-05 09:37:16,216 - INFO - New best model saved with Val Loss: 0.106565 +2025-07-05 09:38:03,875 - INFO - Epoch 41/150 - Train Loss: 0.119583, Val Loss: 0.107501 +2025-07-05 09:38:51,459 - INFO - Epoch 42/150 - Train Loss: 0.119099, Val Loss: 0.107230 +2025-07-05 09:39:39,038 - INFO - Epoch 43/150 - Train Loss: 0.118426, Val Loss: 0.105269 +2025-07-05 09:39:39,064 - INFO - New best model saved with Val Loss: 0.105269 +2025-07-05 09:40:26,571 - INFO - Epoch 44/150 - Train Loss: 0.117692, Val Loss: 0.106267 +2025-07-05 09:41:14,138 - INFO - Epoch 45/150 - Train Loss: 0.116777, Val Loss: 0.105962 +2025-07-05 09:42:01,693 - INFO - Epoch 46/150 - Train Loss: 0.117086, Val Loss: 0.104997 +2025-07-05 09:42:01,719 - INFO - New best model saved with Val Loss: 0.104997 +2025-07-05 09:42:49,292 - INFO - Epoch 47/150 - Train Loss: 0.116806, Val Loss: 0.104811 +2025-07-05 09:42:49,319 - INFO - New best model saved with Val Loss: 0.104811 +2025-07-05 09:43:36,862 - INFO - Epoch 48/150 - Train Loss: 0.116484, Val Loss: 0.103583 +2025-07-05 09:43:36,891 - INFO - New best model saved with Val Loss: 0.103583 +2025-07-05 09:44:24,407 - INFO - Epoch 49/150 - Train Loss: 0.115938, Val Loss: 0.105950 +2025-07-05 09:45:11,992 - INFO - Epoch 50/150 - Train Loss: 0.115517, Val Loss: 0.107515 +2025-07-05 09:45:59,657 - INFO - Epoch 51/150 - Train Loss: 0.115423, Val Loss: 0.103660 +2025-07-05 09:46:47,252 - INFO - Epoch 52/150 - Train Loss: 0.115276, Val Loss: 0.104326 +2025-07-05 09:47:34,792 - INFO - Epoch 53/150 - Train Loss: 0.114554, Val Loss: 0.103713 +2025-07-05 09:48:22,381 - INFO - Epoch 54/150 - Train Loss: 0.114361, Val Loss: 0.106248 +2025-07-05 09:49:09,944 - INFO - Epoch 55/150 - Train Loss: 0.115308, Val Loss: 0.104109 +2025-07-05 09:49:57,500 - INFO - Epoch 56/150 - Train Loss: 0.114578, Val Loss: 0.105009 +2025-07-05 09:50:45,066 - INFO - Epoch 57/150 - Train Loss: 0.115096, Val Loss: 0.103757 +2025-07-05 09:51:32,578 - INFO - Epoch 58/150 - Train Loss: 0.114655, Val Loss: 0.106341 +2025-07-05 09:52:20,147 - INFO - Epoch 59/150 - Train Loss: 0.115159, Val Loss: 0.107205 +2025-07-05 09:53:07,650 - INFO - Epoch 60/150 - Train Loss: 0.112579, Val Loss: 0.101850 +2025-07-05 09:53:07,677 - INFO - New best model saved with Val Loss: 0.101850 +2025-07-05 09:53:55,400 - INFO - Epoch 61/150 - Train Loss: 0.112142, Val Loss: 0.101521 +2025-07-05 09:53:55,426 - INFO - New best model saved with Val Loss: 0.101521 +2025-07-05 09:54:42,992 - INFO - Epoch 62/150 - Train Loss: 0.112451, Val Loss: 0.101256 +2025-07-05 09:54:43,019 - INFO - New best model saved with Val Loss: 0.101256 +2025-07-05 09:55:30,626 - INFO - Epoch 63/150 - Train Loss: 0.112573, Val Loss: 0.101790 +2025-07-05 09:56:18,194 - INFO - Epoch 64/150 - Train Loss: 0.112334, Val Loss: 0.101467 +2025-07-05 09:57:05,733 - INFO - Epoch 65/150 - Train Loss: 0.112359, Val Loss: 0.101630 +2025-07-05 09:57:53,276 - INFO - Epoch 66/150 - Train Loss: 0.112317, Val Loss: 0.101602 +2025-07-05 09:58:40,808 - INFO - Epoch 67/150 - Train Loss: 0.111656, Val Loss: 0.101648 +2025-07-05 09:59:28,301 - INFO - Epoch 68/150 - Train Loss: 0.112325, Val Loss: 0.101424 +2025-07-05 10:00:15,806 - INFO - Epoch 69/150 - Train Loss: 0.112211, Val Loss: 0.101313 +2025-07-05 10:01:03,351 - INFO - Epoch 70/150 - Train Loss: 0.111580, Val Loss: 0.101064 +2025-07-05 10:01:03,379 - INFO - New best model saved with Val Loss: 0.101064 +2025-07-05 10:01:51,085 - INFO - Epoch 71/150 - Train Loss: 0.111994, Val Loss: 0.101423 +2025-07-05 10:02:38,643 - INFO - Epoch 72/150 - Train Loss: 0.111675, Val Loss: 0.101559 +2025-07-05 10:03:26,191 - INFO - Epoch 73/150 - Train Loss: 0.112274, Val Loss: 0.101257 +2025-07-05 10:04:13,772 - INFO - Epoch 74/150 - Train Loss: 0.111927, Val Loss: 0.101388 +2025-07-05 10:05:01,360 - INFO - Epoch 75/150 - Train Loss: 0.112268, Val Loss: 0.101364 +2025-07-05 10:05:48,903 - INFO - Epoch 76/150 - Train Loss: 0.111703, Val Loss: 0.101176 +2025-07-05 10:06:36,423 - INFO - Epoch 77/150 - Train Loss: 0.111602, Val Loss: 0.100929 +2025-07-05 10:06:36,451 - INFO - New best model saved with Val Loss: 0.100929 +2025-07-05 10:07:23,960 - INFO - Epoch 78/150 - Train Loss: 0.110812, Val Loss: 0.101396 +2025-07-05 10:08:11,463 - INFO - Epoch 79/150 - Train Loss: 0.111649, Val Loss: 0.101161 +2025-07-05 10:08:58,997 - INFO - Epoch 80/150 - Train Loss: 0.111938, Val Loss: 0.101558 +2025-07-05 10:09:46,777 - INFO - Epoch 81/150 - Train Loss: 0.111773, Val Loss: 0.100997 +2025-07-05 10:10:34,312 - INFO - Epoch 82/150 - Train Loss: 0.111186, Val Loss: 0.101250 +2025-07-05 10:11:21,918 - INFO - Epoch 83/150 - Train Loss: 0.112286, Val Loss: 0.101473 +2025-07-05 10:12:09,423 - INFO - Epoch 84/150 - Train Loss: 0.111227, Val Loss: 0.101389 +2025-07-05 10:12:56,933 - INFO - Epoch 85/150 - Train Loss: 0.111354, Val Loss: 0.101104 +2025-07-05 10:13:44,466 - INFO - Epoch 86/150 - Train Loss: 0.111357, Val Loss: 0.101038 +2025-07-05 10:14:31,975 - INFO - Epoch 87/150 - Train Loss: 0.111645, Val Loss: 0.100999 +2025-07-05 10:15:19,477 - INFO - Epoch 88/150 - Train Loss: 0.112036, Val Loss: 0.100978 +2025-07-05 10:16:06,975 - INFO - Epoch 89/150 - Train Loss: 0.111140, Val Loss: 0.100872 +2025-07-05 10:16:07,003 - INFO - New best model saved with Val Loss: 0.100872 +2025-07-05 10:16:54,502 - INFO - Epoch 90/150 - Train Loss: 0.111373, Val Loss: 0.101360 +2025-07-05 10:17:42,210 - INFO - Epoch 91/150 - Train Loss: 0.110803, Val Loss: 0.100967 +2025-07-05 10:18:29,740 - INFO - Epoch 92/150 - Train Loss: 0.111550, Val Loss: 0.101122 +2025-07-05 10:19:17,273 - INFO - Epoch 93/150 - Train Loss: 0.111214, Val Loss: 0.101079 +2025-07-05 10:20:04,824 - INFO - Epoch 94/150 - Train Loss: 0.110622, Val Loss: 0.101070 +2025-07-05 10:20:52,447 - INFO - Epoch 95/150 - Train Loss: 0.111025, Val Loss: 0.100847 +2025-07-05 10:20:52,474 - INFO - New best model saved with Val Loss: 0.100847 +2025-07-05 10:21:40,006 - INFO - Epoch 96/150 - Train Loss: 0.110931, Val Loss: 0.100946 +2025-07-05 10:22:27,493 - INFO - Epoch 97/150 - Train Loss: 0.111296, Val Loss: 0.101126 +2025-07-05 10:23:15,018 - INFO - Epoch 98/150 - Train Loss: 0.111636, Val Loss: 0.100806 +2025-07-05 10:23:15,045 - INFO - New best model saved with Val Loss: 0.100806 +2025-07-05 10:24:02,599 - INFO - Epoch 99/150 - Train Loss: 0.107761, Val Loss: 0.100924 +2025-07-05 10:24:50,190 - INFO - Epoch 100/150 - Train Loss: 0.110888, Val Loss: 0.100864 +2025-07-05 10:25:37,899 - INFO - Epoch 101/150 - Train Loss: 0.111423, Val Loss: 0.100831 +2025-07-05 10:26:25,455 - INFO - Epoch 102/150 - Train Loss: 0.111504, Val Loss: 0.100779 +2025-07-05 10:26:25,482 - INFO - New best model saved with Val Loss: 0.100779 +2025-07-05 10:27:13,042 - INFO - Epoch 103/150 - Train Loss: 0.110363, Val Loss: 0.100840 +2025-07-05 10:28:00,566 - INFO - Epoch 104/150 - Train Loss: 0.111284, Val Loss: 0.100882 +2025-07-05 10:28:48,112 - INFO - Epoch 105/150 - Train Loss: 0.110993, Val Loss: 0.101063 +2025-07-05 10:29:35,613 - INFO - Epoch 106/150 - Train Loss: 0.111335, Val Loss: 0.100919 +2025-07-05 10:30:23,151 - INFO - Epoch 107/150 - Train Loss: 0.111190, Val Loss: 0.101027 +2025-07-05 10:31:10,660 - INFO - Epoch 108/150 - Train Loss: 0.111392, Val Loss: 0.101150 +2025-07-05 10:31:58,218 - INFO - Epoch 109/150 - Train Loss: 0.111451, Val Loss: 0.100945 +2025-07-05 10:32:45,791 - INFO - Epoch 110/150 - Train Loss: 0.111334, Val Loss: 0.100793 +2025-07-05 10:33:33,524 - INFO - Epoch 111/150 - Train Loss: 0.111340, Val Loss: 0.100805 +2025-07-05 10:34:21,024 - INFO - Epoch 112/150 - Train Loss: 0.112088, Val Loss: 0.100891 +2025-07-05 10:35:08,561 - INFO - Epoch 113/150 - Train Loss: 0.112194, Val Loss: 0.100758 +2025-07-05 10:35:08,588 - INFO - New best model saved with Val Loss: 0.100758 +2025-07-05 10:35:56,132 - INFO - Epoch 114/150 - Train Loss: 0.111665, Val Loss: 0.101036 +2025-07-05 10:36:43,730 - INFO - Epoch 115/150 - Train Loss: 0.111541, Val Loss: 0.100667 +2025-07-05 10:36:43,756 - INFO - New best model saved with Val Loss: 0.100667 +2025-07-05 10:37:31,257 - INFO - Epoch 116/150 - Train Loss: 0.111737, Val Loss: 0.101022 +2025-07-05 10:38:18,751 - INFO - Epoch 117/150 - Train Loss: 0.111449, Val Loss: 0.100828 +2025-07-05 10:39:06,293 - INFO - Epoch 118/150 - Train Loss: 0.110879, Val Loss: 0.101010 +2025-07-05 10:39:53,904 - INFO - Epoch 119/150 - Train Loss: 0.111162, Val Loss: 0.101130 +2025-07-05 10:40:41,430 - INFO - Epoch 120/150 - Train Loss: 0.110652, Val Loss: 0.101108 +2025-07-05 10:41:29,112 - INFO - Epoch 121/150 - Train Loss: 0.111208, Val Loss: 0.101022 +2025-07-05 10:42:16,678 - INFO - Epoch 122/150 - Train Loss: 0.111119, Val Loss: 0.100764 +2025-07-05 10:43:04,218 - INFO - Epoch 123/150 - Train Loss: 0.111224, Val Loss: 0.100755 +2025-07-05 10:43:51,739 - INFO - Epoch 124/150 - Train Loss: 0.111680, Val Loss: 0.100771 +2025-07-05 10:44:39,273 - INFO - Epoch 125/150 - Train Loss: 0.111103, Val Loss: 0.100727 +2025-07-05 10:45:26,844 - INFO - Epoch 126/150 - Train Loss: 0.109218, Val Loss: 0.101016 +2025-07-05 10:46:14,413 - INFO - Epoch 127/150 - Train Loss: 0.110773, Val Loss: 0.100918 +2025-07-05 10:47:01,956 - INFO - Epoch 128/150 - Train Loss: 0.111515, Val Loss: 0.100885 +2025-07-05 10:47:49,473 - INFO - Epoch 129/150 - Train Loss: 0.110828, Val Loss: 0.100919 +2025-07-05 10:48:37,032 - INFO - Epoch 130/150 - Train Loss: 0.111200, Val Loss: 0.100731 +2025-07-05 10:49:24,727 - INFO - Epoch 131/150 - Train Loss: 0.111076, Val Loss: 0.100860 +2025-07-05 10:50:12,282 - INFO - Epoch 132/150 - Train Loss: 0.111362, Val Loss: 0.100931 +2025-07-05 10:50:59,801 - INFO - Epoch 133/150 - Train Loss: 0.111084, Val Loss: 0.100533 +2025-07-05 10:50:59,828 - INFO - New best model saved with Val Loss: 0.100533 +2025-07-05 10:51:47,400 - INFO - Epoch 134/150 - Train Loss: 0.111459, Val Loss: 0.100953 +2025-07-05 10:52:34,987 - INFO - Epoch 135/150 - Train Loss: 0.111771, Val Loss: 0.100807 +2025-07-05 10:53:22,459 - INFO - Epoch 136/150 - Train Loss: 0.110902, Val Loss: 0.100884 +2025-07-05 10:54:09,966 - INFO - Epoch 137/150 - Train Loss: 0.110943, Val Loss: 0.101010 +2025-07-05 10:54:57,551 - INFO - Epoch 138/150 - Train Loss: 0.111562, Val Loss: 0.100699 +2025-07-05 10:55:45,106 - INFO - Epoch 139/150 - Train Loss: 0.110941, Val Loss: 0.101229 +2025-07-05 10:56:32,679 - INFO - Epoch 140/150 - Train Loss: 0.111054, Val Loss: 0.100888 +2025-07-05 10:57:20,367 - INFO - Epoch 141/150 - Train Loss: 0.111287, Val Loss: 0.100840 +2025-07-05 10:58:07,869 - INFO - Epoch 142/150 - Train Loss: 0.111119, Val Loss: 0.100915 +2025-07-05 10:58:55,481 - INFO - Epoch 143/150 - Train Loss: 0.111254, Val Loss: 0.101041 +2025-07-05 10:59:43,026 - INFO - Epoch 144/150 - Train Loss: 0.111139, Val Loss: 0.100839 +2025-07-05 11:00:30,547 - INFO - Epoch 145/150 - Train Loss: 0.110496, Val Loss: 0.101012 +2025-07-05 11:01:18,069 - INFO - Epoch 146/150 - Train Loss: 0.111641, Val Loss: 0.100863 +2025-07-05 11:02:05,625 - INFO - Epoch 147/150 - Train Loss: 0.111523, Val Loss: 0.100461 +2025-07-05 11:02:05,653 - INFO - New best model saved with Val Loss: 0.100461 +2025-07-05 11:02:53,170 - INFO - Epoch 148/150 - Train Loss: 0.110912, Val Loss: 0.100984 +2025-07-05 11:03:40,723 - INFO - Epoch 149/150 - Train Loss: 0.110507, Val Loss: 0.100842 +2025-07-05 11:04:28,263 - INFO - Epoch 150/150 - Train Loss: 0.111389, Val Loss: 0.100568 +2025-07-05 11:04:28,493 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-07-05 11:04:28,494 - INFO - Testing the final model +2025-07-05 11:04:28,494 - INFO - Testing the best model +2025-07-05 11:54:21,673 - INFO - args.exp_name : Train_Test +2025-07-05 11:54:21,674 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 150, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 10000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 1} +2025-07-05 11:54:21,674 - INFO - Starting training with 1 GPUs +2025-07-05 11:54:25,203 - INFO - Total trainable parameters: 1437705 +2025-07-05 11:54:25,387 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-05 11:54:25,388 - INFO - Loading best model for testing only +2025-07-05 11:54:33,609 - INFO - Total MSE across all processes: 5.320414066314697 +2025-07-05 11:54:33,613 - INFO - mean value for all_targets: {tmp} +2025-07-05 11:54:33,616 - INFO - Test MSE: 0.098526, Test MAE: 0.177215, Max AE: 17.375210, Test R2: 0.9120 +2025-07-05 11:54:33,616 - INFO - Relative L2 Error: 0.295874, Relative L1 error: 0.273636 +2025-07-05 11:54:33,617 - INFO - Total inference time: 0.81s for 54 samples +2025-07-09 08:12:45,741 - INFO - args.exp_name : Train_Test +2025-07-09 08:12:45,744 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 150, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 10000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 1} +2025-07-09 08:12:45,744 - INFO - Starting training with 1 GPUs +2025-07-09 08:12:53,213 - INFO - Total trainable parameters: 1437705 +2025-07-09 08:12:53,445 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-09 08:12:53,447 - INFO - Loading best model for testing only +2025-07-09 08:13:01,631 - INFO - Total MSE across all processes: 5.320414066314697 +2025-07-09 08:13:01,635 - INFO - mean value for all_targets: {tmp} +2025-07-09 08:13:01,640 - INFO - Test MSE: 0.098526, Test MAE: 0.177215, Max AE: 17.375210, Test R2: 0.9120 +2025-07-09 08:13:01,640 - INFO - Relative L2 Error: 0.295874, Relative L1 error: 0.273636 +2025-07-09 08:13:01,640 - INFO - Total inference time: 0.91s for 54 samples +2025-07-09 08:13:21,719 - INFO - args.exp_name : Train_Test +2025-07-09 08:13:21,720 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 150, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 10000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 0} +2025-07-09 08:13:21,721 - INFO - Starting training with 1 GPUs +2025-07-09 08:13:24,127 - INFO - Total trainable parameters: 1437705 +2025-07-09 08:13:24,313 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-09 08:13:24,314 - INFO - Staring training for 150 epochs +2025-07-09 08:13:48,894 - INFO - args.exp_name : Train_Test +2025-07-09 08:13:48,898 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 150, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 10000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 0} +2025-07-09 08:13:48,898 - INFO - Starting training with 1 GPUs +2025-07-09 08:13:51,268 - INFO - Total trainable parameters: 1437705 +2025-07-09 08:13:51,456 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-09 08:13:51,457 - INFO - Staring training for 150 epochs +2025-07-09 08:14:42,651 - INFO - Epoch 1/150 - Train Loss: 0.752180, Val Loss: 1.124732 +2025-07-09 08:14:42,685 - INFO - New best model saved with Val Loss: 1.124732 +2025-07-09 08:15:31,699 - INFO - Epoch 2/150 - Train Loss: 0.410244, Val Loss: 0.379351 +2025-07-09 08:15:31,732 - INFO - New best model saved with Val Loss: 0.379351 +2025-07-09 08:16:20,480 - INFO - Epoch 3/150 - Train Loss: 0.348636, Val Loss: 0.409999 +2025-07-09 08:17:09,324 - INFO - Epoch 4/150 - Train Loss: 0.313532, Val Loss: 0.295374 +2025-07-09 08:17:09,356 - INFO - New best model saved with Val Loss: 0.295374 +2025-07-09 08:17:58,306 - INFO - Epoch 5/150 - Train Loss: 0.286809, Val Loss: 0.481297 +2025-07-09 08:18:47,103 - INFO - Epoch 6/150 - Train Loss: 0.267345, Val Loss: 3.256105 +2025-07-09 08:19:35,957 - INFO - Epoch 7/150 - Train Loss: 0.248175, Val Loss: 0.297765 +2025-07-09 08:20:24,732 - INFO - Epoch 8/150 - Train Loss: 0.233266, Val Loss: 0.295982 +2025-07-09 08:21:13,344 - INFO - Epoch 9/150 - Train Loss: 0.223489, Val Loss: 0.655006 +2025-07-09 08:22:01,981 - INFO - Epoch 10/150 - Train Loss: 0.215305, Val Loss: 0.423533 +2025-07-09 08:22:51,132 - INFO - Epoch 11/150 - Train Loss: 0.203425, Val Loss: 0.388887 +2025-07-09 08:23:39,831 - INFO - Epoch 12/150 - Train Loss: 0.193738, Val Loss: 0.255016 +2025-07-09 08:23:39,864 - INFO - New best model saved with Val Loss: 0.255016 +2025-07-09 08:24:28,555 - INFO - Epoch 13/150 - Train Loss: 0.193163, Val Loss: 0.278204 +2025-07-09 08:25:17,321 - INFO - Epoch 14/150 - Train Loss: 0.191126, Val Loss: 0.221406 +2025-07-09 08:25:17,353 - INFO - New best model saved with Val Loss: 0.221406 +2025-07-09 08:26:06,045 - INFO - Epoch 15/150 - Train Loss: 0.184478, Val Loss: 0.215028 +2025-07-09 08:26:06,077 - INFO - New best model saved with Val Loss: 0.215028 +2025-07-09 08:26:54,948 - INFO - Epoch 16/150 - Train Loss: 0.178109, Val Loss: 0.325122 +2025-07-09 08:27:43,819 - INFO - Epoch 17/150 - Train Loss: 0.174796, Val Loss: 0.258391 +2025-07-09 08:28:32,672 - INFO - Epoch 18/150 - Train Loss: 0.174784, Val Loss: 0.225220 +2025-07-09 08:29:21,892 - INFO - Epoch 19/150 - Train Loss: 0.169611, Val Loss: 0.248846 +2025-07-09 08:30:11,040 - INFO - Epoch 20/150 - Train Loss: 0.167436, Val Loss: 0.471155 +2025-07-09 08:31:00,353 - INFO - Epoch 21/150 - Train Loss: 0.163698, Val Loss: 0.192040 +2025-07-09 08:31:00,385 - INFO - New best model saved with Val Loss: 0.192040 +2025-07-09 08:31:49,250 - INFO - Epoch 22/150 - Train Loss: 0.161416, Val Loss: 0.370374 +2025-07-09 08:32:38,200 - INFO - Epoch 23/150 - Train Loss: 0.158124, Val Loss: 0.158734 +2025-07-09 08:32:38,232 - INFO - New best model saved with Val Loss: 0.158734 +2025-07-09 08:33:27,027 - INFO - Epoch 24/150 - Train Loss: 0.155866, Val Loss: 0.205952 +2025-07-09 08:34:15,933 - INFO - Epoch 25/150 - Train Loss: 0.155378, Val Loss: 0.184193 +2025-07-09 08:35:04,546 - INFO - Epoch 26/150 - Train Loss: 0.150835, Val Loss: 0.167461 +2025-07-09 08:35:53,394 - INFO - Epoch 27/150 - Train Loss: 0.149848, Val Loss: 0.153055 +2025-07-09 08:35:53,440 - INFO - New best model saved with Val Loss: 0.153055 +2025-07-09 08:36:42,268 - INFO - Epoch 28/150 - Train Loss: 0.154199, Val Loss: 0.136550 +2025-07-09 08:36:42,300 - INFO - New best model saved with Val Loss: 0.136550 +2025-07-09 08:37:31,171 - INFO - Epoch 29/150 - Train Loss: 0.143742, Val Loss: 0.152760 +2025-07-09 08:38:19,864 - INFO - Epoch 30/150 - Train Loss: 0.144288, Val Loss: 0.193623 +2025-07-09 08:39:08,801 - INFO - Epoch 31/150 - Train Loss: 0.145189, Val Loss: 0.166961 +2025-07-09 08:39:57,423 - INFO - Epoch 32/150 - Train Loss: 0.141460, Val Loss: 0.157264 +2025-07-09 08:40:46,027 - INFO - Epoch 33/150 - Train Loss: 0.145188, Val Loss: 0.219487 +2025-07-09 08:41:34,963 - INFO - Epoch 34/150 - Train Loss: 0.138041, Val Loss: 0.179359 +2025-07-09 08:42:23,784 - INFO - Epoch 35/150 - Train Loss: 0.139006, Val Loss: 0.147838 +2025-07-09 08:43:12,535 - INFO - Epoch 36/150 - Train Loss: 0.133996, Val Loss: 0.173444 +2025-07-09 08:44:01,348 - INFO - Epoch 37/150 - Train Loss: 0.133981, Val Loss: 0.150009 +2025-07-09 08:44:50,005 - INFO - Epoch 38/150 - Train Loss: 0.130496, Val Loss: 0.138145 +2025-07-09 08:45:38,869 - INFO - Epoch 39/150 - Train Loss: 0.134783, Val Loss: 0.167007 +2025-07-09 08:46:27,678 - INFO - Epoch 40/150 - Train Loss: 0.123822, Val Loss: 0.106565 +2025-07-09 08:46:27,711 - INFO - New best model saved with Val Loss: 0.106565 +2025-07-09 08:47:16,782 - INFO - Epoch 41/150 - Train Loss: 0.119583, Val Loss: 0.107501 +2025-07-09 08:48:05,637 - INFO - Epoch 42/150 - Train Loss: 0.119099, Val Loss: 0.107230 +2025-07-09 08:48:54,599 - INFO - Epoch 43/150 - Train Loss: 0.118426, Val Loss: 0.105269 +2025-07-09 08:48:54,632 - INFO - New best model saved with Val Loss: 0.105269 +2025-07-09 08:49:43,417 - INFO - Epoch 44/150 - Train Loss: 0.117692, Val Loss: 0.106267 +2025-07-09 08:50:32,198 - INFO - Epoch 45/150 - Train Loss: 0.116777, Val Loss: 0.105962 +2025-07-09 08:51:20,803 - INFO - Epoch 46/150 - Train Loss: 0.117086, Val Loss: 0.104997 +2025-07-09 08:51:20,834 - INFO - New best model saved with Val Loss: 0.104997 +2025-07-09 08:52:09,930 - INFO - Epoch 47/150 - Train Loss: 0.116806, Val Loss: 0.104811 +2025-07-09 08:52:09,961 - INFO - New best model saved with Val Loss: 0.104811 +2025-07-09 08:52:58,763 - INFO - Epoch 48/150 - Train Loss: 0.116484, Val Loss: 0.103583 +2025-07-09 08:52:58,796 - INFO - New best model saved with Val Loss: 0.103583 +2025-07-09 08:53:47,377 - INFO - Epoch 49/150 - Train Loss: 0.115938, Val Loss: 0.105950 +2025-07-09 08:54:35,989 - INFO - Epoch 50/150 - Train Loss: 0.115517, Val Loss: 0.107515 +2025-07-09 08:55:24,969 - INFO - Epoch 51/150 - Train Loss: 0.115423, Val Loss: 0.103660 +2025-07-09 08:56:13,781 - INFO - Epoch 52/150 - Train Loss: 0.115276, Val Loss: 0.104326 +2025-07-09 08:57:02,501 - INFO - Epoch 53/150 - Train Loss: 0.114554, Val Loss: 0.103713 +2025-07-09 08:57:51,396 - INFO - Epoch 54/150 - Train Loss: 0.114361, Val Loss: 0.106248 +2025-07-09 08:58:40,243 - INFO - Epoch 55/150 - Train Loss: 0.115308, Val Loss: 0.104109 +2025-07-09 08:59:28,999 - INFO - Epoch 56/150 - Train Loss: 0.114578, Val Loss: 0.105009 +2025-07-09 09:00:17,853 - INFO - Epoch 57/150 - Train Loss: 0.115096, Val Loss: 0.103757 +2025-07-09 09:01:06,719 - INFO - Epoch 58/150 - Train Loss: 0.114655, Val Loss: 0.106341 +2025-07-09 09:01:55,380 - INFO - Epoch 59/150 - Train Loss: 0.115159, Val Loss: 0.107205 +2025-07-09 09:02:44,310 - INFO - Epoch 60/150 - Train Loss: 0.112579, Val Loss: 0.101850 +2025-07-09 09:02:44,345 - INFO - New best model saved with Val Loss: 0.101850 +2025-07-09 09:03:33,879 - INFO - Epoch 61/150 - Train Loss: 0.112142, Val Loss: 0.101521 +2025-07-09 09:03:33,913 - INFO - New best model saved with Val Loss: 0.101521 +2025-07-09 09:04:23,269 - INFO - Epoch 62/150 - Train Loss: 0.112451, Val Loss: 0.101256 +2025-07-09 09:04:23,301 - INFO - New best model saved with Val Loss: 0.101256 +2025-07-09 09:05:12,211 - INFO - Epoch 63/150 - Train Loss: 0.112573, Val Loss: 0.101790 +2025-07-09 09:06:01,360 - INFO - Epoch 64/150 - Train Loss: 0.112334, Val Loss: 0.101467 +2025-07-09 09:06:50,157 - INFO - Epoch 65/150 - Train Loss: 0.112359, Val Loss: 0.101630 +2025-07-09 09:07:38,968 - INFO - Epoch 66/150 - Train Loss: 0.112317, Val Loss: 0.101602 +2025-07-09 09:08:27,919 - INFO - Epoch 67/150 - Train Loss: 0.111656, Val Loss: 0.101648 +2025-07-09 09:09:16,830 - INFO - Epoch 68/150 - Train Loss: 0.112325, Val Loss: 0.101424 +2025-07-09 09:10:05,647 - INFO - Epoch 69/150 - Train Loss: 0.112211, Val Loss: 0.101313 +2025-07-09 09:10:54,272 - INFO - Epoch 70/150 - Train Loss: 0.111580, Val Loss: 0.101064 +2025-07-09 09:10:54,305 - INFO - New best model saved with Val Loss: 0.101064 +2025-07-09 09:11:43,350 - INFO - Epoch 71/150 - Train Loss: 0.111994, Val Loss: 0.101423 +2025-07-09 09:12:32,168 - INFO - Epoch 72/150 - Train Loss: 0.111675, Val Loss: 0.101559 +2025-07-09 09:13:21,079 - INFO - Epoch 73/150 - Train Loss: 0.112274, Val Loss: 0.101257 +2025-07-09 09:14:09,728 - INFO - Epoch 74/150 - Train Loss: 0.111927, Val Loss: 0.101388 +2025-07-09 09:14:58,585 - INFO - Epoch 75/150 - Train Loss: 0.112268, Val Loss: 0.101364 +2025-07-09 09:15:47,589 - INFO - Epoch 76/150 - Train Loss: 0.111703, Val Loss: 0.101176 +2025-07-09 09:16:36,445 - INFO - Epoch 77/150 - Train Loss: 0.111602, Val Loss: 0.100929 +2025-07-09 09:16:36,477 - INFO - New best model saved with Val Loss: 0.100929 +2025-07-09 09:17:25,334 - INFO - Epoch 78/150 - Train Loss: 0.110812, Val Loss: 0.101396 +2025-07-09 09:18:14,019 - INFO - Epoch 79/150 - Train Loss: 0.111649, Val Loss: 0.101161 +2025-07-09 09:19:02,757 - INFO - Epoch 80/150 - Train Loss: 0.111938, Val Loss: 0.101558 +2025-07-09 09:19:51,727 - INFO - Epoch 81/150 - Train Loss: 0.111773, Val Loss: 0.100997 +2025-07-09 09:20:40,549 - INFO - Epoch 82/150 - Train Loss: 0.111186, Val Loss: 0.101250 +2025-07-09 09:21:29,401 - INFO - Epoch 83/150 - Train Loss: 0.112286, Val Loss: 0.101473 +2025-07-09 09:22:18,229 - INFO - Epoch 84/150 - Train Loss: 0.111227, Val Loss: 0.101389 +2025-07-09 09:23:07,127 - INFO - Epoch 85/150 - Train Loss: 0.111354, Val Loss: 0.101104 +2025-07-09 09:23:55,918 - INFO - Epoch 86/150 - Train Loss: 0.111357, Val Loss: 0.101038 +2025-07-09 09:24:44,815 - INFO - Epoch 87/150 - Train Loss: 0.111645, Val Loss: 0.100999 +2025-07-09 09:25:33,442 - INFO - Epoch 88/150 - Train Loss: 0.112036, Val Loss: 0.100978 +2025-07-09 09:26:22,157 - INFO - Epoch 89/150 - Train Loss: 0.111140, Val Loss: 0.100872 +2025-07-09 09:26:22,207 - INFO - New best model saved with Val Loss: 0.100872 +2025-07-09 09:27:11,154 - INFO - Epoch 90/150 - Train Loss: 0.111373, Val Loss: 0.101360 +2025-07-09 09:28:00,090 - INFO - Epoch 91/150 - Train Loss: 0.110803, Val Loss: 0.100967 +2025-07-09 09:28:48,855 - INFO - Epoch 92/150 - Train Loss: 0.111550, Val Loss: 0.101122 +2025-07-09 09:29:37,549 - INFO - Epoch 93/150 - Train Loss: 0.111214, Val Loss: 0.101079 +2025-07-09 09:30:26,366 - INFO - Epoch 94/150 - Train Loss: 0.110622, Val Loss: 0.101070 +2025-07-09 09:31:15,236 - INFO - Epoch 95/150 - Train Loss: 0.111025, Val Loss: 0.100847 +2025-07-09 09:31:15,271 - INFO - New best model saved with Val Loss: 0.100847 +2025-07-09 09:32:04,036 - INFO - Epoch 96/150 - Train Loss: 0.110931, Val Loss: 0.100946 +2025-07-09 09:32:52,843 - INFO - Epoch 97/150 - Train Loss: 0.111296, Val Loss: 0.101126 +2025-07-09 09:33:41,599 - INFO - Epoch 98/150 - Train Loss: 0.111636, Val Loss: 0.100806 +2025-07-09 09:33:41,650 - INFO - New best model saved with Val Loss: 0.100806 +2025-07-09 09:34:30,477 - INFO - Epoch 99/150 - Train Loss: 0.107761, Val Loss: 0.100924 +2025-07-09 09:35:19,253 - INFO - Epoch 100/150 - Train Loss: 0.110888, Val Loss: 0.100864 +2025-07-09 09:36:08,332 - INFO - Epoch 101/150 - Train Loss: 0.111423, Val Loss: 0.100831 +2025-07-09 09:36:57,715 - INFO - Epoch 102/150 - Train Loss: 0.111504, Val Loss: 0.100779 +2025-07-09 09:36:57,762 - INFO - New best model saved with Val Loss: 0.100779 +2025-07-09 09:37:46,971 - INFO - Epoch 103/150 - Train Loss: 0.110363, Val Loss: 0.100840 +2025-07-09 09:38:35,800 - INFO - Epoch 104/150 - Train Loss: 0.111284, Val Loss: 0.100882 +2025-07-09 09:39:24,661 - INFO - Epoch 105/150 - Train Loss: 0.110993, Val Loss: 0.101063 +2025-07-09 09:40:13,634 - INFO - Epoch 106/150 - Train Loss: 0.111335, Val Loss: 0.100919 +2025-07-09 09:41:02,375 - INFO - Epoch 107/150 - Train Loss: 0.111190, Val Loss: 0.101027 +2025-07-09 09:41:51,208 - INFO - Epoch 108/150 - Train Loss: 0.111392, Val Loss: 0.101150 +2025-07-09 09:42:39,946 - INFO - Epoch 109/150 - Train Loss: 0.111451, Val Loss: 0.100945 +2025-07-09 09:43:28,797 - INFO - Epoch 110/150 - Train Loss: 0.111334, Val Loss: 0.100793 +2025-07-09 09:44:17,610 - INFO - Epoch 111/150 - Train Loss: 0.111340, Val Loss: 0.100805 +2025-07-09 09:45:06,397 - INFO - Epoch 112/150 - Train Loss: 0.112088, Val Loss: 0.100891 +2025-07-09 09:45:55,106 - INFO - Epoch 113/150 - Train Loss: 0.112194, Val Loss: 0.100758 +2025-07-09 09:45:55,139 - INFO - New best model saved with Val Loss: 0.100758 +2025-07-09 09:46:43,754 - INFO - Epoch 114/150 - Train Loss: 0.111665, Val Loss: 0.101036 +2025-07-09 09:47:32,616 - INFO - Epoch 115/150 - Train Loss: 0.111541, Val Loss: 0.100667 +2025-07-09 09:47:32,648 - INFO - New best model saved with Val Loss: 0.100667 +2025-07-09 09:48:21,616 - INFO - Epoch 116/150 - Train Loss: 0.111737, Val Loss: 0.101022 +2025-07-09 09:49:10,375 - INFO - Epoch 117/150 - Train Loss: 0.111449, Val Loss: 0.100828 +2025-07-09 09:49:59,155 - INFO - Epoch 118/150 - Train Loss: 0.110879, Val Loss: 0.101010 +2025-07-09 09:50:47,968 - INFO - Epoch 119/150 - Train Loss: 0.111162, Val Loss: 0.101130 +2025-07-09 09:51:36,874 - INFO - Epoch 120/150 - Train Loss: 0.110652, Val Loss: 0.101108 +2025-07-09 09:52:25,778 - INFO - Epoch 121/150 - Train Loss: 0.111208, Val Loss: 0.101022 +2025-07-09 09:53:14,449 - INFO - Epoch 122/150 - Train Loss: 0.111119, Val Loss: 0.100764 +2025-07-09 09:54:03,137 - INFO - Epoch 123/150 - Train Loss: 0.111224, Val Loss: 0.100755 +2025-07-09 09:54:51,988 - INFO - Epoch 124/150 - Train Loss: 0.111680, Val Loss: 0.100771 +2025-07-09 09:55:40,799 - INFO - Epoch 125/150 - Train Loss: 0.111103, Val Loss: 0.100727 +2025-07-09 09:56:29,909 - INFO - Epoch 126/150 - Train Loss: 0.109218, Val Loss: 0.101016 +2025-07-09 09:57:18,924 - INFO - Epoch 127/150 - Train Loss: 0.110773, Val Loss: 0.100918 +2025-07-09 09:58:09,431 - INFO - Epoch 128/150 - Train Loss: 0.111515, Val Loss: 0.100885 +2025-07-09 09:58:58,353 - INFO - Epoch 129/150 - Train Loss: 0.110828, Val Loss: 0.100919 +2025-07-09 09:59:47,305 - INFO - Epoch 130/150 - Train Loss: 0.111200, Val Loss: 0.100731 +2025-07-09 10:00:36,311 - INFO - Epoch 131/150 - Train Loss: 0.111076, Val Loss: 0.100860 +2025-07-09 10:01:26,388 - INFO - Epoch 132/150 - Train Loss: 0.111362, Val Loss: 0.100931 +2025-07-09 10:02:16,315 - INFO - Epoch 133/150 - Train Loss: 0.111084, Val Loss: 0.100533 +2025-07-09 10:02:16,349 - INFO - New best model saved with Val Loss: 0.100533 +2025-07-09 10:03:05,245 - INFO - Epoch 134/150 - Train Loss: 0.111459, Val Loss: 0.100953 +2025-07-09 10:03:54,164 - INFO - Epoch 135/150 - Train Loss: 0.111771, Val Loss: 0.100807 +2025-07-09 10:04:43,022 - INFO - Epoch 136/150 - Train Loss: 0.110902, Val Loss: 0.100884 +2025-07-09 10:05:31,644 - INFO - Epoch 137/150 - Train Loss: 0.110943, Val Loss: 0.101010 +2025-07-09 10:06:20,404 - INFO - Epoch 138/150 - Train Loss: 0.111562, Val Loss: 0.100699 +2025-07-09 10:07:09,323 - INFO - Epoch 139/150 - Train Loss: 0.110941, Val Loss: 0.101229 +2025-07-09 10:07:58,054 - INFO - Epoch 140/150 - Train Loss: 0.111054, Val Loss: 0.100888 +2025-07-09 10:08:47,049 - INFO - Epoch 141/150 - Train Loss: 0.111287, Val Loss: 0.100840 +2025-07-09 10:09:35,925 - INFO - Epoch 142/150 - Train Loss: 0.111119, Val Loss: 0.100915 +2025-07-09 10:10:25,153 - INFO - Epoch 143/150 - Train Loss: 0.111254, Val Loss: 0.101041 +2025-07-09 10:11:14,398 - INFO - Epoch 144/150 - Train Loss: 0.111139, Val Loss: 0.100839 +2025-07-09 10:12:03,353 - INFO - Epoch 145/150 - Train Loss: 0.110496, Val Loss: 0.101012 +2025-07-09 10:12:52,414 - INFO - Epoch 146/150 - Train Loss: 0.111641, Val Loss: 0.100863 +2025-07-09 10:13:41,575 - INFO - Epoch 147/150 - Train Loss: 0.111523, Val Loss: 0.100461 +2025-07-09 10:13:41,608 - INFO - New best model saved with Val Loss: 0.100461 +2025-07-09 10:14:30,504 - INFO - Epoch 148/150 - Train Loss: 0.110912, Val Loss: 0.100984 +2025-07-09 10:15:19,243 - INFO - Epoch 149/150 - Train Loss: 0.110507, Val Loss: 0.100842 +2025-07-09 10:16:08,076 - INFO - Epoch 150/150 - Train Loss: 0.111389, Val Loss: 0.100568 +2025-07-09 10:16:08,322 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-07-09 10:16:08,323 - INFO - Testing the final model +2025-07-09 10:16:08,323 - INFO - Testing the best model +2025-07-09 11:42:08,630 - INFO - args.exp_name : Train_Test +2025-07-09 11:42:08,633 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 150, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 10000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 1} +2025-07-09 11:42:08,634 - INFO - Starting training with 1 GPUs +2025-07-09 11:42:11,968 - INFO - Total trainable parameters: 1437705 +2025-07-09 11:42:12,156 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-09 11:42:12,158 - INFO - Loading best model for testing only +2025-07-09 11:51:13,369 - INFO - args.exp_name : Train_Test +2025-07-09 11:51:13,371 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 150, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 10000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 1} +2025-07-09 11:51:13,371 - INFO - Starting training with 1 GPUs +2025-07-09 11:51:15,819 - INFO - Total trainable parameters: 1437705 +2025-07-09 11:51:16,005 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-09 11:51:16,009 - INFO - Loading best model for testing only +2025-07-11 16:56:26,266 - INFO - args.exp_name : Train_Test +2025-07-11 16:56:26,268 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/Data_Pressure/Cache_data', + 'dataset_path': '/work/mae-zhangbj/Data_Pressure/Pressure_VTK', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 150, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 100000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 1} +2025-07-11 16:56:26,268 - INFO - Starting training with 1 GPUs +2025-07-11 16:56:32,445 - INFO - Total trainable parameters: 1437705 +2025-07-11 16:56:32,500 - INFO - Data loaded: 3 training batches, 1 validation batches, 1 test batches +2025-07-11 16:56:32,502 - INFO - Loading best model for testing only +2025-07-11 17:01:05,604 - INFO - args.exp_name : Train_Test +2025-07-11 17:01:05,605 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/Data_Pressure/Cache_data', + 'dataset_path': '/work/mae-zhangbj/Data_Pressure/Pressure_VTK', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 150, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 10000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 1} +2025-07-11 17:01:05,605 - INFO - *******************************Starting training with 1 GPUs +2025-07-11 17:01:07,858 - INFO - Total trainable parameters: 1437705 +2025-07-11 17:01:07,878 - INFO - Data loaded: 3 training batches, 1 validation batches, 1 test batches +2025-07-11 17:01:07,880 - INFO - Loading best model for testing only +2025-07-11 17:01:18,411 - INFO - args.exp_name : Train_Test +2025-07-11 17:01:18,412 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/Data_Pressure/Cache_data', + 'dataset_path': '/work/mae-zhangbj/Data_Pressure/Pressure_VTK', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 150, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 10000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 1} +2025-07-11 17:01:18,412 - INFO - *******************************Starting training with 1 GPUs +2025-07-11 17:01:20,626 - INFO - Total trainable parameters: 1437705 +2025-07-11 17:01:20,645 - INFO - Data loaded: 3 training batches, 1 validation batches, 1 test batches +2025-07-11 17:01:20,646 - INFO - Loading best model for testing only +2025-07-11 17:12:07,338 - INFO - args.exp_name : Train_Test +2025-07-11 17:12:07,341 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/Data_Pressure/Cache_data', + 'dataset_path': '/work/mae-zhangbj/Data_Pressure/Pressure_VTK', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 20, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 100000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 1} +2025-07-11 17:12:07,341 - INFO - *******************************Starting training with 1 GPUs +2025-07-11 17:12:09,473 - INFO - Total trainable parameters: 1437705 +2025-07-11 17:12:09,492 - INFO - Data loaded: 3 training batches, 1 validation batches, 1 test batches +2025-07-11 17:12:09,493 - INFO - Loading best model for testing only +2025-07-11 17:15:01,687 - INFO - args.exp_name : Train_Test +2025-07-11 17:15:01,689 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/Data_Pressure/Cache_data', + 'dataset_path': '/work/mae-zhangbj/Data_Pressure/Pressure_VTK', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 20, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 100000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 1} +2025-07-11 17:15:01,689 - INFO - *******************************Starting training with 1 GPUs +2025-07-11 17:15:03,812 - INFO - Total trainable parameters: 1437705 +2025-07-11 17:15:03,832 - INFO - Data loaded: 3 training batches, 1 validation batches, 1 test batches +2025-07-11 17:15:03,834 - INFO - Loading best model for testing only +2025-07-11 17:15:08,181 - INFO - Total MSE across all processes: 0.5307230949401855 +2025-07-11 17:15:08,182 - INFO - mean value for all_targets: {tmp} +2025-07-11 17:15:08,183 - INFO - Test MSE: 0.088454, Test MAE: 0.171458, Max AE: 5.417973, Test R2: 0.9216 +2025-07-11 17:15:08,183 - INFO - Relative L2 Error: 0.279420, Relative L1 error: 0.261442 +2025-07-11 17:15:08,183 - INFO - Total inference time: 0.68s for 6 samples +2025-07-11 17:25:29,580 - INFO - args.exp_name : Train_Test +2025-07-11 17:25:29,583 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 60, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 100000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 1} +2025-07-11 17:25:29,583 - INFO - *******************************Starting training with 1 GPUs +2025-07-11 17:25:31,722 - INFO - Total trainable parameters: 1437705 +2025-07-11 17:25:31,905 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-11 17:25:31,908 - INFO - Loading best model for testing only +2025-07-11 17:25:39,176 - INFO - Total MSE across all processes: 5.320414066314697 +2025-07-11 17:25:39,179 - INFO - mean value for all_targets: {tmp} +2025-07-11 17:25:39,183 - INFO - Test MSE: 0.098526, Test MAE: 0.177215, Max AE: 17.375210, Test R2: 0.9120 +2025-07-11 17:25:39,184 - INFO - Relative L2 Error: 0.295874, Relative L1 error: 0.273636 +2025-07-11 17:25:39,184 - INFO - Total inference time: 0.72s for 54 samples +2025-07-11 17:26:19,152 - INFO - args.exp_name : Train_Test +2025-07-11 17:26:19,154 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 60, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 100000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 0} +2025-07-11 17:26:19,155 - INFO - *******************************Starting training with 1 GPUs +2025-07-11 17:26:21,286 - INFO - Total trainable parameters: 1437705 +2025-07-11 17:26:21,469 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-11 17:26:21,470 - INFO - Staring training for 60 epochs +2025-07-11 17:27:09,728 - INFO - Epoch 1/60 - Train Loss: 0.752180, Val Loss: 1.124732 +2025-07-11 17:27:09,751 - INFO - New best model saved with Val Loss: 1.124732 +2025-07-11 17:27:56,693 - INFO - Epoch 2/60 - Train Loss: 0.410244, Val Loss: 0.379351 +2025-07-11 17:27:56,714 - INFO - New best model saved with Val Loss: 0.379351 +2025-07-11 17:28:43,685 - INFO - Epoch 3/60 - Train Loss: 0.348636, Val Loss: 0.409999 +2025-07-11 17:29:30,610 - INFO - Epoch 4/60 - Train Loss: 0.313532, Val Loss: 0.295374 +2025-07-11 17:29:30,630 - INFO - New best model saved with Val Loss: 0.295374 +2025-07-11 17:30:17,560 - INFO - Epoch 5/60 - Train Loss: 0.286809, Val Loss: 0.481297 +2025-07-11 17:31:04,523 - INFO - Epoch 6/60 - Train Loss: 0.267345, Val Loss: 3.256105 +2025-07-11 17:31:51,521 - INFO - Epoch 7/60 - Train Loss: 0.248175, Val Loss: 0.297765 +2025-07-11 17:32:38,460 - INFO - Epoch 8/60 - Train Loss: 0.233266, Val Loss: 0.295982 +2025-07-11 17:33:25,381 - INFO - Epoch 9/60 - Train Loss: 0.223489, Val Loss: 0.655006 +2025-07-11 17:34:12,305 - INFO - Epoch 10/60 - Train Loss: 0.215305, Val Loss: 0.423533 +2025-07-11 17:34:59,514 - INFO - Epoch 11/60 - Train Loss: 0.203425, Val Loss: 0.388887 +2025-07-11 17:35:46,532 - INFO - Epoch 12/60 - Train Loss: 0.193738, Val Loss: 0.255016 +2025-07-11 17:35:46,552 - INFO - New best model saved with Val Loss: 0.255016 +2025-07-11 17:36:33,495 - INFO - Epoch 13/60 - Train Loss: 0.193163, Val Loss: 0.278204 +2025-07-11 17:37:20,463 - INFO - Epoch 14/60 - Train Loss: 0.191126, Val Loss: 0.221406 +2025-07-11 17:37:20,483 - INFO - New best model saved with Val Loss: 0.221406 +2025-07-11 17:38:07,485 - INFO - Epoch 15/60 - Train Loss: 0.184478, Val Loss: 0.215028 +2025-07-11 17:38:07,504 - INFO - New best model saved with Val Loss: 0.215028 +2025-07-11 17:38:54,483 - INFO - Epoch 16/60 - Train Loss: 0.178109, Val Loss: 0.325122 +2025-07-11 17:39:41,449 - INFO - Epoch 17/60 - Train Loss: 0.174796, Val Loss: 0.258391 +2025-07-11 17:40:28,423 - INFO - Epoch 18/60 - Train Loss: 0.174784, Val Loss: 0.225220 +2025-07-11 17:41:15,439 - INFO - Epoch 19/60 - Train Loss: 0.169611, Val Loss: 0.248846 +2025-07-11 17:42:02,411 - INFO - Epoch 20/60 - Train Loss: 0.167436, Val Loss: 0.471155 +2025-07-11 17:42:49,559 - INFO - Epoch 21/60 - Train Loss: 0.163698, Val Loss: 0.192040 +2025-07-11 17:42:49,578 - INFO - New best model saved with Val Loss: 0.192040 +2025-07-11 17:43:36,543 - INFO - Epoch 22/60 - Train Loss: 0.161416, Val Loss: 0.370374 +2025-07-11 17:44:23,552 - INFO - Epoch 23/60 - Train Loss: 0.158124, Val Loss: 0.158734 +2025-07-11 17:44:23,571 - INFO - New best model saved with Val Loss: 0.158734 +2025-07-11 17:45:10,561 - INFO - Epoch 24/60 - Train Loss: 0.155866, Val Loss: 0.205952 +2025-07-11 17:45:57,538 - INFO - Epoch 25/60 - Train Loss: 0.155378, Val Loss: 0.184193 +2025-07-11 17:46:44,479 - INFO - Epoch 26/60 - Train Loss: 0.150835, Val Loss: 0.167461 +2025-07-11 17:47:31,465 - INFO - Epoch 27/60 - Train Loss: 0.149848, Val Loss: 0.153055 +2025-07-11 17:47:31,484 - INFO - New best model saved with Val Loss: 0.153055 +2025-07-11 17:48:18,469 - INFO - Epoch 28/60 - Train Loss: 0.154199, Val Loss: 0.136550 +2025-07-11 17:48:18,488 - INFO - New best model saved with Val Loss: 0.136550 +2025-07-11 17:49:05,482 - INFO - Epoch 29/60 - Train Loss: 0.143742, Val Loss: 0.152760 +2025-07-11 17:49:52,485 - INFO - Epoch 30/60 - Train Loss: 0.144288, Val Loss: 0.193623 +2025-07-11 17:50:39,619 - INFO - Epoch 31/60 - Train Loss: 0.145189, Val Loss: 0.166961 +2025-07-11 17:51:26,569 - INFO - Epoch 32/60 - Train Loss: 0.141460, Val Loss: 0.157264 +2025-07-11 17:52:13,543 - INFO - Epoch 33/60 - Train Loss: 0.145188, Val Loss: 0.219487 +2025-07-11 17:53:00,471 - INFO - Epoch 34/60 - Train Loss: 0.138041, Val Loss: 0.179359 +2025-07-11 17:53:47,462 - INFO - Epoch 35/60 - Train Loss: 0.139006, Val Loss: 0.147838 +2025-07-11 17:54:34,413 - INFO - Epoch 36/60 - Train Loss: 0.133996, Val Loss: 0.173444 +2025-07-11 17:55:21,377 - INFO - Epoch 37/60 - Train Loss: 0.133981, Val Loss: 0.150009 +2025-07-11 17:56:08,315 - INFO - Epoch 38/60 - Train Loss: 0.130496, Val Loss: 0.138145 +2025-07-11 17:56:55,270 - INFO - Epoch 39/60 - Train Loss: 0.134783, Val Loss: 0.167007 +2025-07-11 17:57:42,216 - INFO - Epoch 40/60 - Train Loss: 0.123822, Val Loss: 0.106565 +2025-07-11 17:57:42,236 - INFO - New best model saved with Val Loss: 0.106565 +2025-07-11 17:58:29,331 - INFO - Epoch 41/60 - Train Loss: 0.119583, Val Loss: 0.107501 +2025-07-11 17:59:16,265 - INFO - Epoch 42/60 - Train Loss: 0.119099, Val Loss: 0.107230 +2025-07-11 18:00:03,242 - INFO - Epoch 43/60 - Train Loss: 0.118426, Val Loss: 0.105269 +2025-07-11 18:00:03,262 - INFO - New best model saved with Val Loss: 0.105269 +2025-07-11 18:00:50,184 - INFO - Epoch 44/60 - Train Loss: 0.117692, Val Loss: 0.106267 +2025-07-11 18:01:37,119 - INFO - Epoch 45/60 - Train Loss: 0.116777, Val Loss: 0.105962 +2025-07-11 18:02:24,097 - INFO - Epoch 46/60 - Train Loss: 0.117086, Val Loss: 0.104997 +2025-07-11 18:02:24,115 - INFO - New best model saved with Val Loss: 0.104997 +2025-07-11 18:03:11,082 - INFO - Epoch 47/60 - Train Loss: 0.116806, Val Loss: 0.104811 +2025-07-11 18:03:11,100 - INFO - New best model saved with Val Loss: 0.104811 +2025-07-11 18:03:58,067 - INFO - Epoch 48/60 - Train Loss: 0.116484, Val Loss: 0.103583 +2025-07-11 18:03:58,086 - INFO - New best model saved with Val Loss: 0.103583 +2025-07-11 18:04:45,044 - INFO - Epoch 49/60 - Train Loss: 0.115938, Val Loss: 0.105950 +2025-07-11 18:05:31,951 - INFO - Epoch 50/60 - Train Loss: 0.115517, Val Loss: 0.107515 +2025-07-11 18:06:19,071 - INFO - Epoch 51/60 - Train Loss: 0.115423, Val Loss: 0.103660 +2025-07-11 18:07:06,050 - INFO - Epoch 52/60 - Train Loss: 0.115276, Val Loss: 0.104326 +2025-07-11 18:07:53,061 - INFO - Epoch 53/60 - Train Loss: 0.114554, Val Loss: 0.103713 +2025-07-11 18:08:39,981 - INFO - Epoch 54/60 - Train Loss: 0.114361, Val Loss: 0.106248 +2025-07-11 18:09:27,001 - INFO - Epoch 55/60 - Train Loss: 0.115308, Val Loss: 0.104109 +2025-07-11 18:10:13,971 - INFO - Epoch 56/60 - Train Loss: 0.114578, Val Loss: 0.105009 +2025-07-11 18:11:00,882 - INFO - Epoch 57/60 - Train Loss: 0.115096, Val Loss: 0.103757 +2025-07-11 18:11:47,837 - INFO - Epoch 58/60 - Train Loss: 0.114655, Val Loss: 0.106341 +2025-07-11 18:12:34,805 - INFO - Epoch 59/60 - Train Loss: 0.115159, Val Loss: 0.107205 +2025-07-11 18:13:21,738 - INFO - Epoch 60/60 - Train Loss: 0.112579, Val Loss: 0.101850 +2025-07-11 18:13:21,761 - INFO - New best model saved with Val Loss: 0.101850 +2025-07-11 18:13:21,975 - INFO - Final model saved to experiments/Train_Test/final_model_pth +2025-07-11 18:13:21,976 - INFO - Testing the final model +2025-07-11 18:13:21,976 - INFO - Testing the best model +2025-07-14 08:18:41,349 - INFO - args.exp_name : Train_Test +2025-07-14 08:18:41,354 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 60, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 100000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 0} +2025-07-14 08:18:41,354 - INFO - *******************************Starting training with 1 GPUs +2025-07-14 08:18:49,310 - INFO - Total trainable parameters: 1437705 +2025-07-14 08:18:49,505 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-14 08:18:49,508 - INFO - Staring training for 60 epochs +2025-08-07 12:22:17,333 - INFO - args.exp_name : Train_Test +2025-08-07 12:22:17,346 - INFO - Arguments: +{ 'batch_size': 6, + 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'epochs': 60, + 'exp_name': 'Train_Test', + 'gpus': '0', + 'k': 40, + 'lr': 0.001, + 'num_points': 50000, + 'num_workers': 1, + 'output_channels': 1, + 'seed': 1, + 'subset_dir': '/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', + 'test_only': 0} +2025-08-07 12:22:17,346 - INFO - *******************************Starting training with 1 GPUs +2025-08-07 12:22:24,927 - INFO - Total trainable parameters: 1437705 diff --git a/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/training_progress.png b/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/training_progress.png new file mode 100644 index 0000000..5ae308f Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/experiments/Train_Test/training_progress.png differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/model_pressure.py b/RegDGCNN_SurfaceFields/My_python_job/model_pressure.py new file mode 100644 index 0000000..0e4ccac --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/model_pressure.py @@ -0,0 +1,222 @@ +# model_pressure.py +""" +@author: Mohamed Elrefaie, mohamed.elrefaie@mit.edu + +Model architecture for pressure field prediction on the DrivAerNet++ dataset. + +This module implements the RegDGCNN model for predicting pressure fields +on 3D car models from the DrivAerNet++ dataset. +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.nn.init as init + +import numpy as np + + +def knn(x, k): + """ + k-nearest neighbors algorithm. + + Args: + x: Input tensor of shape (batch_size, feature_dim, num_points) + k: Number of neighbors to consider + + Returns: + Indices of k-nearest neighbors for each point + """ + inner = -2 * torch.matmul(x.transpose(2, 1), x) + xx = torch.sum(x ** 2, dim=1, keepdim=True) + pairwise_distance = -xx - inner - xx.transpose(2, 1) + + idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k) + return idx + + +def get_graph_feature(x, k=20, idx=None, dim9=False): + """ + Construct edge features for graph convolution. + + When you create this function first, Just use 1 batch_size, 2 dims, 2 points + to Understand this code + + Then Enlarge it meeting the physical need + + Args: + x: Input tensor of shape (batch_size, feature_dim, num_points) + k: Number of neighbors to use for graph construction + idx: Optional pre-computed nearest neighbor indices + dim9: Whether to use additional dimensional features + + Returns: + Edge features for graph convolution + """ + batch_size = x.size(0) + num_points = x.size(2) + x = x.view(batch_size, -1, num_points) + if idx is None: + idx = knn(x, k=k) # (batch_size, num_points, k) + + device = x.device + idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points + idx = idx + idx_base + idx = idx.view(-1) + + _, num_dims, _ = x.size() + x = x.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) + feature = x.view(batch_size * num_points, -1)[idx, :] + feature = feature.view(batch_size, num_points, k, num_dims) + x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1) + + feature = torch.cat((feature - x, x), dim=3).permute(0, 3, 1, 2).contiguous() + return feature # (batch_size, 2*num_dims, num_points, k) + + +class Transform_Net(nn.Module): + def __init__(self, args): + super(Transform_Net, self).__init__() + self.args = args + self.k = 3 + + self.bn1 = nn.BatchNorm2d(64) + self.bn2 = nn.BatchNorm2d(128) + self.bn3 = nn.BatchNorm1d(1024) + + self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False), + self.bn1, + nn.LeakyReLU(negative_slope=0.2)) + self.conv2 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=1, bias=False), + self.bn2, + nn.LeakyReLU(negative_slope=0.2)) + self.conv3 = nn.Sequential(nn.Conv1d(128, 1024, kernel_size=1, bias=False), + self.bn3, + nn.LeakyReLU(negative_slope=0.2)) + + self.linear1 = nn.Linear(1024, 512, bias=False) + self.bn4 = nn.BatchNorm1d(512) + self.linear2 = nn.Linear(512, 256, bias=False) + self.bn5 = nn.BatchNorm1d(256) + + self.transform = nn.Linear(256, 3*3) + init.constant_(self.transform.weight, 0) + init.eye_(self.transform.bias.view(3, 3)) + + def forward(self, x): + batch_size = x.size(0) + + x = self.conv1(x) # (batch_size, 3*2, num_points, k) -> (batch_size, 64, num_points, k) + x = self.conv2(x) # (batch_size, 64, num_points, k) -> (batch_size, 128, num_points, k) + x = x.max(dim=-1, keepdim=False)[0] # (batch_size, 128, num_points, k) -> (batch_size, 128, num_points) + + x = self.conv3(x) # (batch_size, 128, num_points) -> (batch_size, 1024, num_points) + x = x.max(dim=-1, keepdim=False)[0] # (batch_size, 1024, num_points) -> (batch_size, 1024) + + x = F.leaky_relu(self.bn4(self.linear1(x)), negative_slope=0.2) # (batch_size, 1024) -> (batch_size, 512) + x = F.leaky_relu(self.bn5(self.linear2(x)), negative_slope=0.2) # (batch_size, 512) -> (batch_size, 256) + + x = self.transform(x) # (batch_size, 256) -> (batch_size, 3*3) + x = x.view(batch_size, 3, 3) # (batch_size, 3*3) -> (batch_size, 3, 3) + + return x + +class RegDGCNN_pressure(nn.Module): + def __init__(self, args): + super(RegDGCNN_pressure, self).__init__() + self.args = args + self.seg_num_all = 1 + self.k = args['k'] + self.transform_net = Transform_Net(args) + + self.bn1 = nn.BatchNorm2d(64) + self.bn2 = nn.BatchNorm2d(64) + self.bn3 = nn.BatchNorm2d(64) + self.bn4 = nn.BatchNorm2d(64) + self.bn5 = nn.BatchNorm2d(64) + self.bn6 = nn.BatchNorm1d(args['emb_dims']) + self.bn7 = nn.BatchNorm1d(64) + self.bn8 = nn.BatchNorm1d(256) + self.bn9 = nn.BatchNorm1d(256) + self.bn10 = nn.BatchNorm1d(128) + + self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False), + self.bn1, + nn.LeakyReLU(negative_slope=0.2)) + self.conv2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=False), + self.bn2, + nn.LeakyReLU(negative_slope=0.2)) + self.conv3 = nn.Sequential(nn.Conv2d(64 * 2, 64, kernel_size=1, bias=False), + self.bn3, + nn.LeakyReLU(negative_slope=0.2)) + self.conv4 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=False), + self.bn4, + nn.LeakyReLU(negative_slope=0.2)) + self.conv5 = nn.Sequential(nn.Conv2d(64 * 2, 64, kernel_size=1, bias=False), + self.bn5, + nn.LeakyReLU(negative_slope=0.2)) + self.conv6 = nn.Sequential(nn.Conv1d(192, args['emb_dims'], kernel_size=1, bias=False), + self.bn6, + nn.LeakyReLU(negative_slope=0.2)) + self.conv7 = nn.Sequential(nn.Conv1d(16, 64, kernel_size=1, bias=False), + self.bn7, + nn.LeakyReLU(negative_slope=0.2)) + self.conv8 = nn.Sequential(nn.Conv1d(1216, 256, kernel_size=1, bias=False), + self.bn8, + nn.LeakyReLU(negative_slope=0.2)) + self.dp1 = nn.Dropout(p=args['dropout']) + self.conv9 = nn.Sequential(nn.Conv1d(256, 256, kernel_size=1, bias=False), + self.bn9, + nn.LeakyReLU(negative_slope=0.2)) + self.dp2 = nn.Dropout(p=args['dropout']) + self.conv10 = nn.Sequential(nn.Conv1d(256, 128, kernel_size=1, bias=False), + self.bn10, + nn.LeakyReLU(negative_slope=0.2)) + self.conv11 = nn.Conv1d(128, self.seg_num_all, kernel_size=1, bias=False) + + def forward(self, x): + batch_size = x.size(0) + num_points = x.size(2) + + x0 = get_graph_feature(x, k=self.k) # (batch_size, 3, num_points) -> (batch_size, 3*2, num_points, k) + t = self.transform_net(x0) # (batch_size, 3, 3) + x = x.transpose(2, 1) # (batch_size, 3, num_points) -> (batch_size, num_points, 3) + x = torch.bmm(x, t) # (batch_size, num_points, 3) * (batch_size, 3, 3) -> (batch_size, num_points, 3) + x = x.transpose(2, 1) # (batch_size, num_points, 3) -> (batch_size, 3, num_points) + + x = get_graph_feature(x, k=self.k) # (batch_size, 3, num_points) -> (batch_size, 3*2, num_points, k) + x = self.conv1(x) # (batch_size, 3*2, num_points, k) -> (batch_size, 64, num_points, k) + x = self.conv2(x) # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points, k) + x1 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points) + + x = get_graph_feature(x1, k=self.k) # (batch_size, 64, num_points) -> (batch_size, 64*2, num_points, k) + x = self.conv3(x) # (batch_size, 64*2, num_points, k) -> (batch_size, 64, num_points, k) + x = self.conv4(x) # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points, k) + x2 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points) + + x = get_graph_feature(x2, k=self.k) # (batch_size, 64, num_points) -> (batch_size, 64*2, num_points, k) + x = self.conv5(x) # (batch_size, 64*2, num_points, k) -> (batch_size, 64, num_points, k) + x3 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points) + + x = torch.cat((x1, x2, x3), dim=1) # (batch_size, 64*3, num_points) + + x = self.conv6(x) # (batch_size, 64*3, num_points) -> (batch_size, emb_dims, num_points) + x = x.max(dim=-1, keepdim=True)[0] # (batch_size, emb_dims, num_points) -> (batch_size, emb_dims, 1) + + # l = l.view(batch_size, -1, 1) # (batch_size, num_categoties, 1) + # l = self.conv7(l) # (batch_size, num_categoties, 1) -> (batch_size, 64, 1) + # + # x = torch.cat((x, l), dim=1) # (batch_size, 1088, 1) + x = x.repeat(1, 1, num_points) # (batch_size, 1088, num_points) + + x = torch.cat((x, x1, x2, x3), dim=1) # (batch_size, 1024+64*3, num_points) + + x = self.conv8(x) # (batch_size, 1024+64*3, num_points) -> (batch_size, 256, num_points) + x = self.dp1(x) + x = self.conv9(x) # (batch_size, 256, num_points) -> (batch_size, 256, num_points) + x = self.dp2(x) + x = self.conv10(x) # (batch_size, 256, num_points) -> (batch_size, 128, num_points) + x = self.conv11(x) # (batch_size, 256, num_points) -> (batch_size, seg_num_all, num_points) + + return x + diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/aggregated_metrics.npz b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/aggregated_metrics.npz new file mode 100644 index 0000000..d0f78b5 Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/aggregated_metrics.npz differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/evaluation.log b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/evaluation.log new file mode 100644 index 0000000..c686b00 --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/evaluation.log @@ -0,0 +1,6316 @@ +2025-07-05 14:08:10,495 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-05 14:08:10,495 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-05 14:10:06,895 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-05 14:10:06,903 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-05 14:10:40,818 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-05 14:10:40,823 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-05 14:11:07,822 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-05 14:11:07,822 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-05 14:11:08,062 - INFO - Using device: cuda:0 +2025-07-05 14:11:46,832 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-05 14:11:46,832 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-05 14:11:47,066 - INFO - Using device: cuda:0 +2025-07-05 14:11:47,066 - INFO - args: ['__class__', '__contains__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_get_args', '_get_kwargs', 'cache_dir', 'dataset_path', 'dropout', 'emb_dims', 'exp_name', 'k', 'model_checkpoint', 'num_points', 'num_vis_samples', 'output_channels', 'sample_ids', 'seed', 'visualize'] +2025-07-05 14:41:33,838 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-05 14:41:33,839 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-05 14:41:34,090 - INFO - Using device: cuda:0 +2025-07-05 14:41:34,271 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-05 14:41:34,336 - INFO - 0, module.transform_net.bn1.weight +2025-07-05 14:41:34,336 - INFO - 1, module.transform_net.bn1.bias +2025-07-05 14:41:34,336 - INFO - 2, module.transform_net.bn1.running_mean +2025-07-05 14:41:34,336 - INFO - 3, module.transform_net.bn1.running_var +2025-07-05 14:41:34,336 - INFO - 4, module.transform_net.bn1.num_batches_tracked +2025-07-05 14:41:34,336 - INFO - 5, module.transform_net.bn2.weight +2025-07-05 14:41:34,336 - INFO - 6, module.transform_net.bn2.bias +2025-07-05 14:41:34,336 - INFO - 7, module.transform_net.bn2.running_mean +2025-07-05 14:41:34,336 - INFO - 8, module.transform_net.bn2.running_var +2025-07-05 14:41:34,336 - INFO - 9, module.transform_net.bn2.num_batches_tracked +2025-07-05 14:41:34,336 - INFO - 10, module.transform_net.bn3.weight +2025-07-05 14:41:34,336 - INFO - 11, module.transform_net.bn3.bias +2025-07-05 14:41:34,336 - INFO - 12, module.transform_net.bn3.running_mean +2025-07-05 14:41:34,336 - INFO - 13, module.transform_net.bn3.running_var +2025-07-05 14:41:34,336 - INFO - 14, module.transform_net.bn3.num_batches_tracked +2025-07-05 14:41:34,336 - INFO - 15, module.transform_net.conv1.0.weight +2025-07-05 14:41:34,336 - INFO - 16, module.transform_net.conv1.1.weight +2025-07-05 14:41:34,336 - INFO - 17, module.transform_net.conv1.1.bias +2025-07-05 14:41:34,336 - INFO - 18, module.transform_net.conv1.1.running_mean +2025-07-05 14:41:34,336 - INFO - 19, module.transform_net.conv1.1.running_var +2025-07-05 14:41:34,336 - INFO - 20, module.transform_net.conv1.1.num_batches_tracked +2025-07-05 14:41:34,336 - INFO - 21, module.transform_net.conv2.0.weight +2025-07-05 14:41:34,336 - INFO - 22, module.transform_net.conv2.1.weight +2025-07-05 14:41:34,336 - INFO - 23, module.transform_net.conv2.1.bias +2025-07-05 14:41:34,336 - INFO - 24, module.transform_net.conv2.1.running_mean +2025-07-05 14:41:34,336 - INFO - 25, module.transform_net.conv2.1.running_var +2025-07-05 14:41:34,337 - INFO - 26, module.transform_net.conv2.1.num_batches_tracked +2025-07-05 14:41:34,337 - INFO - 27, module.transform_net.conv3.0.weight +2025-07-05 14:41:34,337 - INFO - 28, module.transform_net.conv3.1.weight +2025-07-05 14:41:34,337 - INFO - 29, module.transform_net.conv3.1.bias +2025-07-05 14:41:34,337 - INFO - 30, module.transform_net.conv3.1.running_mean +2025-07-05 14:41:34,337 - INFO - 31, module.transform_net.conv3.1.running_var +2025-07-05 14:41:34,339 - INFO - 32, module.transform_net.conv3.1.num_batches_tracked +2025-07-05 14:41:34,339 - INFO - 33, module.transform_net.linear1.weight +2025-07-05 14:41:34,340 - INFO - 34, module.transform_net.bn4.weight +2025-07-05 14:41:34,340 - INFO - 35, module.transform_net.bn4.bias +2025-07-05 14:41:34,340 - INFO - 36, module.transform_net.bn4.running_mean +2025-07-05 14:41:34,340 - INFO - 37, module.transform_net.bn4.running_var +2025-07-05 14:41:34,340 - INFO - 38, module.transform_net.bn4.num_batches_tracked +2025-07-05 14:41:34,340 - INFO - 39, module.transform_net.linear2.weight +2025-07-05 14:41:34,340 - INFO - 40, module.transform_net.bn5.weight +2025-07-05 14:41:34,340 - INFO - 41, module.transform_net.bn5.bias +2025-07-05 14:41:34,340 - INFO - 42, module.transform_net.bn5.running_mean +2025-07-05 14:41:34,340 - INFO - 43, module.transform_net.bn5.running_var +2025-07-05 14:41:34,340 - INFO - 44, module.transform_net.bn5.num_batches_tracked +2025-07-05 14:41:34,340 - INFO - 45, module.transform_net.transform.weight +2025-07-05 14:41:34,340 - INFO - 46, module.transform_net.transform.bias +2025-07-05 14:41:34,340 - INFO - 47, module.bn1.weight +2025-07-05 14:41:34,340 - INFO - 48, module.bn1.bias +2025-07-05 14:41:34,340 - INFO - 49, module.bn1.running_mean +2025-07-05 14:41:34,340 - INFO - 50, module.bn1.running_var +2025-07-05 14:41:34,340 - INFO - 51, module.bn1.num_batches_tracked +2025-07-05 14:41:34,340 - INFO - 52, module.bn2.weight +2025-07-05 14:41:34,340 - INFO - 53, module.bn2.bias +2025-07-05 14:41:34,340 - INFO - 54, module.bn2.running_mean +2025-07-05 14:41:34,340 - INFO - 55, module.bn2.running_var +2025-07-05 14:41:34,340 - INFO - 56, module.bn2.num_batches_tracked +2025-07-05 14:41:34,340 - INFO - 57, module.bn3.weight +2025-07-05 14:41:34,340 - INFO - 58, module.bn3.bias +2025-07-05 14:41:34,340 - INFO - 59, module.bn3.running_mean +2025-07-05 14:41:34,340 - INFO - 60, module.bn3.running_var +2025-07-05 14:41:34,341 - INFO - 61, module.bn3.num_batches_tracked +2025-07-05 14:41:34,341 - INFO - 62, module.bn4.weight +2025-07-05 14:41:34,341 - INFO - 63, module.bn4.bias +2025-07-05 14:41:34,341 - INFO - 64, module.bn4.running_mean +2025-07-05 14:41:34,341 - INFO - 65, module.bn4.running_var +2025-07-05 14:41:34,341 - INFO - 66, module.bn4.num_batches_tracked +2025-07-05 14:41:34,341 - INFO - 67, module.bn5.weight +2025-07-05 14:41:34,341 - INFO - 68, module.bn5.bias +2025-07-05 14:41:34,341 - INFO - 69, module.bn5.running_mean +2025-07-05 14:41:34,341 - INFO - 70, module.bn5.running_var +2025-07-05 14:41:34,341 - INFO - 71, module.bn5.num_batches_tracked +2025-07-05 14:41:34,341 - INFO - 72, module.bn6.weight +2025-07-05 14:41:34,341 - INFO - 73, module.bn6.bias +2025-07-05 14:41:34,341 - INFO - 74, module.bn6.running_mean +2025-07-05 14:41:34,341 - INFO - 75, module.bn6.running_var +2025-07-05 14:41:34,341 - INFO - 76, module.bn6.num_batches_tracked +2025-07-05 14:41:34,341 - INFO - 77, module.bn7.weight +2025-07-05 14:41:34,341 - INFO - 78, module.bn7.bias +2025-07-05 14:41:34,341 - INFO - 79, module.bn7.running_mean +2025-07-05 14:41:34,341 - INFO - 80, module.bn7.running_var +2025-07-05 14:41:34,341 - INFO - 81, module.bn7.num_batches_tracked +2025-07-05 14:41:34,341 - INFO - 82, module.bn8.weight +2025-07-05 14:41:34,341 - INFO - 83, module.bn8.bias +2025-07-05 14:41:34,341 - INFO - 84, module.bn8.running_mean +2025-07-05 14:41:34,341 - INFO - 85, module.bn8.running_var +2025-07-05 14:41:34,341 - INFO - 86, module.bn8.num_batches_tracked +2025-07-05 14:41:34,341 - INFO - 87, module.bn9.weight +2025-07-05 14:41:34,341 - INFO - 88, module.bn9.bias +2025-07-05 14:41:34,341 - INFO - 89, module.bn9.running_mean +2025-07-05 14:41:34,341 - INFO - 90, module.bn9.running_var +2025-07-05 14:41:34,341 - INFO - 91, module.bn9.num_batches_tracked +2025-07-05 14:41:34,341 - INFO - 92, module.bn10.weight +2025-07-05 14:41:34,341 - INFO - 93, module.bn10.bias +2025-07-05 14:41:34,341 - INFO - 94, module.bn10.running_mean +2025-07-05 14:41:34,341 - INFO - 95, module.bn10.running_var +2025-07-05 14:41:34,342 - INFO - 96, module.bn10.num_batches_tracked +2025-07-05 14:41:34,342 - INFO - 97, module.conv1.0.weight +2025-07-05 14:41:34,342 - INFO - 98, module.conv1.1.weight +2025-07-05 14:41:34,342 - INFO - 99, module.conv1.1.bias +2025-07-05 14:41:34,342 - INFO - 100, module.conv1.1.running_mean +2025-07-05 14:41:34,342 - INFO - 101, module.conv1.1.running_var +2025-07-05 14:41:34,342 - INFO - 102, module.conv1.1.num_batches_tracked +2025-07-05 14:41:34,342 - INFO - 103, module.conv2.0.weight +2025-07-05 14:41:34,342 - INFO - 104, module.conv2.1.weight +2025-07-05 14:41:34,342 - INFO - 105, module.conv2.1.bias +2025-07-05 14:41:34,342 - INFO - 106, module.conv2.1.running_mean +2025-07-05 14:41:34,342 - INFO - 107, module.conv2.1.running_var +2025-07-05 14:41:34,342 - INFO - 108, module.conv2.1.num_batches_tracked +2025-07-05 14:41:34,342 - INFO - 109, module.conv3.0.weight +2025-07-05 14:41:34,342 - INFO - 110, module.conv3.1.weight +2025-07-05 14:41:34,342 - INFO - 111, module.conv3.1.bias +2025-07-05 14:41:34,342 - INFO - 112, module.conv3.1.running_mean +2025-07-05 14:41:34,342 - INFO - 113, module.conv3.1.running_var +2025-07-05 14:41:34,342 - INFO - 114, module.conv3.1.num_batches_tracked +2025-07-05 14:41:34,342 - INFO - 115, module.conv4.0.weight +2025-07-05 14:41:34,342 - INFO - 116, module.conv4.1.weight +2025-07-05 14:41:34,342 - INFO - 117, module.conv4.1.bias +2025-07-05 14:41:34,342 - INFO - 118, module.conv4.1.running_mean +2025-07-05 14:41:34,342 - INFO - 119, module.conv4.1.running_var +2025-07-05 14:41:34,342 - INFO - 120, module.conv4.1.num_batches_tracked +2025-07-05 14:41:34,342 - INFO - 121, module.conv5.0.weight +2025-07-05 14:41:34,342 - INFO - 122, module.conv5.1.weight +2025-07-05 14:41:34,342 - INFO - 123, module.conv5.1.bias +2025-07-05 14:41:34,342 - INFO - 124, module.conv5.1.running_mean +2025-07-05 14:41:34,342 - INFO - 125, module.conv5.1.running_var +2025-07-05 14:41:34,342 - INFO - 126, module.conv5.1.num_batches_tracked +2025-07-05 14:41:34,342 - INFO - 127, module.conv6.0.weight +2025-07-05 14:41:34,342 - INFO - 128, module.conv6.1.weight +2025-07-05 14:41:34,342 - INFO - 129, module.conv6.1.bias +2025-07-05 14:41:34,342 - INFO - 130, module.conv6.1.running_mean +2025-07-05 14:41:34,343 - INFO - 131, module.conv6.1.running_var +2025-07-05 14:41:34,343 - INFO - 132, module.conv6.1.num_batches_tracked +2025-07-05 14:41:34,343 - INFO - 133, module.conv7.0.weight +2025-07-05 14:41:34,343 - INFO - 134, module.conv7.1.weight +2025-07-05 14:41:34,343 - INFO - 135, module.conv7.1.bias +2025-07-05 14:41:34,343 - INFO - 136, module.conv7.1.running_mean +2025-07-05 14:41:34,343 - INFO - 137, module.conv7.1.running_var +2025-07-05 14:41:34,343 - INFO - 138, module.conv7.1.num_batches_tracked +2025-07-05 14:41:34,343 - INFO - 139, module.conv8.0.weight +2025-07-05 14:41:34,343 - INFO - 140, module.conv8.1.weight +2025-07-05 14:41:34,343 - INFO - 141, module.conv8.1.bias +2025-07-05 14:41:34,343 - INFO - 142, module.conv8.1.running_mean +2025-07-05 14:41:34,343 - INFO - 143, module.conv8.1.running_var +2025-07-05 14:41:34,343 - INFO - 144, module.conv8.1.num_batches_tracked +2025-07-05 14:41:34,343 - INFO - 145, module.conv9.0.weight +2025-07-05 14:41:34,343 - INFO - 146, module.conv9.1.weight +2025-07-05 14:41:34,343 - INFO - 147, module.conv9.1.bias +2025-07-05 14:41:34,343 - INFO - 148, module.conv9.1.running_mean +2025-07-05 14:41:34,343 - INFO - 149, module.conv9.1.running_var +2025-07-05 14:41:34,343 - INFO - 150, module.conv9.1.num_batches_tracked +2025-07-05 14:41:34,343 - INFO - 151, module.conv10.0.weight +2025-07-05 14:41:34,343 - INFO - 152, module.conv10.1.weight +2025-07-05 14:41:34,343 - INFO - 153, module.conv10.1.bias +2025-07-05 14:41:34,343 - INFO - 154, module.conv10.1.running_mean +2025-07-05 14:41:34,343 - INFO - 155, module.conv10.1.running_var +2025-07-05 14:41:34,343 - INFO - 156, module.conv10.1.num_batches_tracked +2025-07-05 14:41:34,343 - INFO - 157, module.conv11.weight +2025-07-05 14:47:03,873 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-05 14:47:03,874 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-05 14:47:04,099 - INFO - Using device: cuda:0 +2025-07-05 14:47:04,273 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-05 14:47:04,334 - INFO - ['module.transform_net.bn1.weight', 'module.transform_net.bn1.bias', 'module.transform_net.bn1.running_mean', 'module.transform_net.bn1.running_var', 'module.transform_net.bn1.num_batches_tracked', 'module.transform_net.bn2.weight', 'module.transform_net.bn2.bias', 'module.transform_net.bn2.running_mean', 'module.transform_net.bn2.running_var', 'module.transform_net.bn2.num_batches_tracked', 'module.transform_net.bn3.weight', 'module.transform_net.bn3.bias', 'module.transform_net.bn3.running_mean', 'module.transform_net.bn3.running_var', 'module.transform_net.bn3.num_batches_tracked', 'module.transform_net.conv1.0.weight', 'module.transform_net.conv1.1.weight', 'module.transform_net.conv1.1.bias', 'module.transform_net.conv1.1.running_mean', 'module.transform_net.conv1.1.running_var', 'module.transform_net.conv1.1.num_batches_tracked', 'module.transform_net.conv2.0.weight', 'module.transform_net.conv2.1.weight', 'module.transform_net.conv2.1.bias', 'module.transform_net.conv2.1.running_mean', 'module.transform_net.conv2.1.running_var', 'module.transform_net.conv2.1.num_batches_tracked', 'module.transform_net.conv3.0.weight', 'module.transform_net.conv3.1.weight', 'module.transform_net.conv3.1.bias', 'module.transform_net.conv3.1.running_mean', 'module.transform_net.conv3.1.running_var', 'module.transform_net.conv3.1.num_batches_tracked', 'module.transform_net.linear1.weight', 'module.transform_net.bn4.weight', 'module.transform_net.bn4.bias', 'module.transform_net.bn4.running_mean', 'module.transform_net.bn4.running_var', 'module.transform_net.bn4.num_batches_tracked', 'module.transform_net.linear2.weight', 'module.transform_net.bn5.weight', 'module.transform_net.bn5.bias', 'module.transform_net.bn5.running_mean', 'module.transform_net.bn5.running_var', 'module.transform_net.bn5.num_batches_tracked', 'module.transform_net.transform.weight', 'module.transform_net.transform.bias', 'module.bn1.weight', 'module.bn1.bias', 'module.bn1.running_mean', 'module.bn1.running_var', 'module.bn1.num_batches_tracked', 'module.bn2.weight', 'module.bn2.bias', 'module.bn2.running_mean', 'module.bn2.running_var', 'module.bn2.num_batches_tracked', 'module.bn3.weight', 'module.bn3.bias', 'module.bn3.running_mean', 'module.bn3.running_var', 'module.bn3.num_batches_tracked', 'module.bn4.weight', 'module.bn4.bias', 'module.bn4.running_mean', 'module.bn4.running_var', 'module.bn4.num_batches_tracked', 'module.bn5.weight', 'module.bn5.bias', 'module.bn5.running_mean', 'module.bn5.running_var', 'module.bn5.num_batches_tracked', 'module.bn6.weight', 'module.bn6.bias', 'module.bn6.running_mean', 'module.bn6.running_var', 'module.bn6.num_batches_tracked', 'module.bn7.weight', 'module.bn7.bias', 'module.bn7.running_mean', 'module.bn7.running_var', 'module.bn7.num_batches_tracked', 'module.bn8.weight', 'module.bn8.bias', 'module.bn8.running_mean', 'module.bn8.running_var', 'module.bn8.num_batches_tracked', 'module.bn9.weight', 'module.bn9.bias', 'module.bn9.running_mean', 'module.bn9.running_var', 'module.bn9.num_batches_tracked', 'module.bn10.weight', 'module.bn10.bias', 'module.bn10.running_mean', 'module.bn10.running_var', 'module.bn10.num_batches_tracked', 'module.conv1.0.weight', 'module.conv1.1.weight', 'module.conv1.1.bias', 'module.conv1.1.running_mean', 'module.conv1.1.running_var', 'module.conv1.1.num_batches_tracked', 'module.conv2.0.weight', 'module.conv2.1.weight', 'module.conv2.1.bias', 'module.conv2.1.running_mean', 'module.conv2.1.running_var', 'module.conv2.1.num_batches_tracked', 'module.conv3.0.weight', 'module.conv3.1.weight', 'module.conv3.1.bias', 'module.conv3.1.running_mean', 'module.conv3.1.running_var', 'module.conv3.1.num_batches_tracked', 'module.conv4.0.weight', 'module.conv4.1.weight', 'module.conv4.1.bias', 'module.conv4.1.running_mean', 'module.conv4.1.running_var', 'module.conv4.1.num_batches_tracked', 'module.conv5.0.weight', 'module.conv5.1.weight', 'module.conv5.1.bias', 'module.conv5.1.running_mean', 'module.conv5.1.running_var', 'module.conv5.1.num_batches_tracked', 'module.conv6.0.weight', 'module.conv6.1.weight', 'module.conv6.1.bias', 'module.conv6.1.running_mean', 'module.conv6.1.running_var', 'module.conv6.1.num_batches_tracked', 'module.conv7.0.weight', 'module.conv7.1.weight', 'module.conv7.1.bias', 'module.conv7.1.running_mean', 'module.conv7.1.running_var', 'module.conv7.1.num_batches_tracked', 'module.conv8.0.weight', 'module.conv8.1.weight', 'module.conv8.1.bias', 'module.conv8.1.running_mean', 'module.conv8.1.running_var', 'module.conv8.1.num_batches_tracked', 'module.conv9.0.weight', 'module.conv9.1.weight', 'module.conv9.1.bias', 'module.conv9.1.running_mean', 'module.conv9.1.running_var', 'module.conv9.1.num_batches_tracked', 'module.conv10.0.weight', 'module.conv10.1.weight', 'module.conv10.1.bias', 'module.conv10.1.running_mean', 'module.conv10.1.running_var', 'module.conv10.1.num_batches_tracked', 'module.conv11.weight'] +2025-07-05 14:47:04,339 - INFO - Evaluation Results: +2025-07-05 14:47:48,810 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-05 14:47:48,811 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-05 14:47:49,044 - INFO - Using device: cuda:0 +2025-07-05 14:47:49,217 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-05 14:47:49,279 - INFO - module.transform_net.bn1.weight +2025-07-05 14:47:49,279 - INFO - Evaluation Results: +2025-07-05 15:01:53,850 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-05 15:01:53,856 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-05 15:01:54,112 - INFO - Using device: cuda:0 +2025-07-05 15:01:54,285 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-05 15:02:25,823 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-05 15:02:25,828 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-05 15:02:26,064 - INFO - Using device: cuda:0 +2025-07-05 15:02:26,238 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-05 15:05:45,188 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-05 15:05:45,196 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-05 15:05:45,565 - INFO - Using device: cuda:0 +2025-07-05 15:05:45,754 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-05 15:05:45,819 - INFO - Evaluation Results: +2025-07-05 15:52:21,318 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-05 15:52:21,318 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-05 15:52:21,556 - INFO - Using device: cuda:0 +2025-07-05 15:52:21,728 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-05 15:52:21,790 - INFO - ******************** +2025-07-07 09:44:49,559 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 09:44:49,564 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 09:44:49,941 - INFO - Using device: cuda:0 +2025-07-07 09:44:50,148 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 09:44:50,680 - INFO - value of state_dict: OrderedDict([('module.transform_net.bn1.weight', tensor([0.9275, 0.9019, 0.8220, 0.8269, 0.9125, 0.8320, 0.9235, 0.9176, 0.8651, + 0.8926, 0.9366, 0.8575, 0.9010, 0.8116, 0.8098, 0.8780, 0.7474, 0.7707, + 0.7645, 0.7270, 0.8303, 0.8718, 0.9079, 0.9308, 0.8282, 0.8368, 0.8314, + 0.8943, 0.8155, 0.8723, 0.9216, 0.8665, 0.8329, 0.9230, 0.9218, 0.7246, + 0.8695, 0.7112, 0.9202, 0.9024, 0.8554, 0.8208, 0.9062, 0.8142, 0.8495, + 0.8911, 0.9491, 0.8981, 0.9079, 0.8774, 0.7910, 0.8610, 0.8535, 0.8589, + 0.8606, 0.7686, 0.8902, 0.8865, 0.9017, 0.8344, 0.9526, 0.8976, 0.8785, + 0.9070], device='cuda:0')), ('module.transform_net.bn1.bias', tensor([-1.0562e-03, -3.7226e-03, 1.3831e-02, -3.8120e-03, 3.0793e-03, + 1.0190e-02, 7.1820e-03, 7.2552e-04, -4.4474e-03, -7.9938e-03, + 1.2937e-03, -1.3946e-03, -1.0807e-03, 6.0967e-03, -8.0081e-03, + -4.4927e-03, 1.7978e-03, -1.2391e-04, 2.1957e-03, -2.4691e-05, + 6.0693e-04, -4.5748e-03, 8.0330e-03, 5.5135e-03, -1.4094e-03, + 2.8556e-03, 6.7849e-04, 1.7594e-03, -3.8234e-03, 3.9159e-03, + 1.3642e-04, -9.6159e-03, 1.3848e-02, 5.8601e-03, 1.3965e-04, + 6.9075e-03, 5.5822e-03, 2.2187e-03, 2.3064e-03, 2.9992e-03, + 7.1861e-04, 1.0549e-03, -2.5443e-03, 3.8391e-04, -1.1725e-02, + -1.1457e-04, 5.8880e-03, -4.9409e-03, 2.7778e-03, -5.6326e-03, + -3.5553e-04, -7.4553e-03, -5.7946e-03, -1.0749e-02, -5.5001e-05, + -2.1818e-03, -6.0871e-03, -9.2138e-03, 1.6463e-02, 1.0079e-02, + 5.0877e-03, -1.1799e-02, 1.2412e-02, -5.6131e-03], device='cuda:0')), ('module.transform_net.bn1.running_mean', tensor([ 0.3973, -0.0452, -0.0407, -0.3346, 0.4613, -0.5564, -0.3521, 0.3470, + -0.3187, -0.3186, 0.4428, -0.4471, 0.2529, -0.4050, 0.5302, 0.0497, + -0.2089, -0.4787, 0.2607, 0.2067, -0.4967, 0.4931, 0.4631, 0.0127, + 0.3378, -0.1750, -0.0887, 0.0801, 0.2989, 0.2025, 0.0822, -0.3575, + -0.2386, -0.4870, 0.0526, 0.5788, 0.5827, -0.2510, -0.4966, 0.2228, + -0.0166, 0.3712, -0.1541, 0.6225, -0.5488, 0.3536, 0.0975, -0.1348, + -0.2541, -0.1311, -0.2351, 0.3539, -0.4166, -0.1345, 0.0857, -0.3491, + -0.0893, -0.2378, 0.0585, 0.1490, -0.0771, -0.0489, 0.3106, -0.5010], + device='cuda:0')), ('module.transform_net.bn1.running_var', tensor([0.1329, 0.0087, 0.0034, 0.0643, 0.1695, 0.2526, 0.1190, 0.0889, 0.0814, + 0.1511, 0.2181, 0.1649, 0.0331, 0.1231, 0.1643, 0.0489, 0.0811, 0.2653, + 0.2039, 0.1494, 0.1558, 0.1964, 0.1773, 0.0314, 0.0686, 0.0659, 0.0669, + 0.0241, 0.0587, 0.0474, 0.0538, 0.0813, 0.1449, 0.1895, 0.0075, 0.2191, + 0.2042, 0.1548, 0.2469, 0.1477, 0.0173, 0.2106, 0.0942, 0.2066, 0.2248, + 0.1726, 0.0064, 0.1339, 0.1446, 0.0261, 0.0268, 0.1454, 0.2668, 0.0080, + 0.0486, 0.0758, 0.0262, 0.0326, 0.0279, 0.0387, 0.0181, 0.0711, 0.0417, + 0.2074], device='cuda:0')), ('module.transform_net.bn1.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.transform_net.bn2.weight', tensor([0.7888, 0.8137, 0.5273, 0.8139, 0.5626, 0.5095, 0.6382, 0.7313, 0.6782, + 0.5885, 0.7845, 0.7150, 0.8483, 0.7457, 0.8340, 0.8451, 0.5911, 0.7267, + 0.6653, 0.7064, 0.8227, 0.8207, 0.6733, 0.7516, 0.7975, 0.8052, 0.5938, + 0.5939, 0.6069, 0.5751, 0.7120, 0.8550, 0.7510, 0.8435, 0.5960, 0.7657, + 0.6822, 0.7027, 0.7741, 0.8267, 0.6078, 0.8552, 0.8364, 0.8056, 0.8854, + 0.8158, 0.5502, 0.8215, 0.8333, 0.6022, 0.8733, 0.8029, 0.7775, 0.6513, + 0.5536, 0.7592, 0.5742, 0.6606, 0.7753, 0.8704, 0.8858, 0.5870, 0.5629, + 0.7906, 0.7605, 0.8450, 0.8297, 0.6893, 0.7834, 0.7048, 0.8268, 0.6847, + 0.8599, 0.8613, 0.7632, 0.8390, 0.7270, 0.8333, 0.7941, 0.5636, 0.7375, + 0.7993, 0.8546, 0.8105, 0.6966, 0.8224, 0.6365, 0.7047, 0.6616, 0.7402, + 0.8789, 0.8059, 0.8140, 0.8351, 0.8481, 0.7621, 0.5946, 0.7633, 0.6194, + 0.7496, 0.8560, 0.8107, 0.7495, 0.6353, 0.8357, 0.7569, 0.7304, 0.7947, + 0.8434, 0.8461, 0.8376, 0.5731, 0.4576, 0.6504, 0.6630, 0.6213, 0.6924, + 0.7659, 0.5234, 0.7876, 0.7163, 0.7625, 0.8533, 0.7532, 0.8173, 0.8940, + 0.7231, 0.6060], device='cuda:0')), ('module.transform_net.bn2.bias', tensor([-0.0102, 0.0015, 0.0055, -0.0005, 0.0139, 0.0031, 0.0024, -0.0054, + 0.0012, 0.0029, 0.0134, 0.0030, -0.0048, 0.0108, 0.0021, 0.0118, + 0.0087, -0.0041, 0.0034, 0.0047, 0.0033, 0.0022, -0.0107, -0.0098, + -0.0087, 0.0010, -0.0019, 0.0048, 0.0004, -0.0048, -0.0060, 0.0035, + -0.0024, -0.0005, 0.0038, 0.0105, 0.0049, 0.0067, -0.0092, -0.0050, + 0.0029, -0.0039, -0.0065, 0.0015, 0.0032, 0.0035, 0.0016, 0.0013, + 0.0111, 0.0101, 0.0063, -0.0004, -0.0065, -0.0064, -0.0074, 0.0099, + -0.0055, -0.0015, 0.0074, 0.0019, 0.0037, 0.0021, 0.0021, -0.0023, + 0.0077, 0.0140, -0.0130, 0.0018, -0.0058, 0.0021, -0.0004, 0.0016, + -0.0060, 0.0104, -0.0078, -0.0001, 0.0033, -0.0175, -0.0010, -0.0009, + 0.0031, 0.0111, -0.0018, 0.0062, 0.0026, -0.0070, -0.0008, 0.0101, + -0.0052, -0.0061, 0.0010, -0.0018, 0.0047, 0.0016, 0.0026, 0.0050, + 0.0024, 0.0053, -0.0079, 0.0126, 0.0055, -0.0023, 0.0068, -0.0003, + -0.0023, 0.0077, -0.0013, 0.0020, 0.0099, 0.0051, 0.0119, -0.0059, + 0.0054, -0.0005, -0.0069, -0.0083, 0.0065, -0.0002, -0.0018, 0.0054, + -0.0086, 0.0031, -0.0017, -0.0084, 0.0010, 0.0027, -0.0035, 0.0087], + device='cuda:0')), ('module.transform_net.bn2.running_mean', tensor([ 0.1365, -0.0587, -0.3559, 0.3706, -0.2779, 0.1596, 0.0730, -0.2106, + 0.1720, -0.1327, -0.0405, 0.0453, 0.1111, -0.0137, -0.0300, -0.0234, + -0.0106, -0.0650, 0.1262, -0.0045, -0.1445, 0.0743, 0.0540, -0.0727, + -0.2320, -0.1412, 0.0902, 0.0538, -0.0704, -0.0558, 0.0812, -0.0222, + -0.2435, -0.2090, -0.0739, 0.1089, 0.1514, -0.0790, -0.1458, 0.1141, + -0.4097, -0.0055, -0.0653, 0.0355, -0.0485, -0.1345, -0.1108, 0.0902, + -0.1913, -0.2523, -0.0691, 0.1981, -0.0117, -0.2619, -0.1539, 0.1080, + -0.1877, 0.1062, 0.0805, -0.1370, -0.2152, -0.4199, 0.1185, -0.1104, + -0.1510, 0.1458, 0.3430, 0.0074, 0.1497, 0.1488, 0.0536, -0.2573, + 0.2205, 0.2069, -0.0927, 0.0433, -0.0235, -0.0520, 0.3270, 0.1209, + -0.1158, -0.0276, 0.0728, 0.0320, 0.3710, -0.0110, -0.1335, -0.0924, + 0.0926, 0.0331, 0.0363, -0.0846, 0.1385, -0.0343, 0.0052, -0.3218, + 0.3276, 0.0118, -0.1096, 0.1240, -0.0398, -0.1811, 0.1365, -0.0184, + 0.1517, -0.0295, 0.0222, -0.0087, -0.2759, 0.2717, -0.1104, -0.1339, + -0.0727, -0.2747, 0.0832, -0.0224, -0.1548, -0.1435, -0.0871, -0.1645, + -0.1869, -0.2086, 0.1161, 0.0350, 0.0647, -0.1451, -0.1575, -0.0065], + device='cuda:0')), ('module.transform_net.bn2.running_var', tensor([0.0959, 0.0718, 0.2080, 0.1024, 0.0352, 0.0453, 0.1981, 0.0814, 0.1024, + 0.0652, 0.0216, 0.1924, 0.0618, 0.0574, 0.0325, 0.0293, 0.0499, 0.1066, + 0.0357, 0.1419, 0.0387, 0.0306, 0.1243, 0.0560, 0.0592, 0.0446, 0.0878, + 0.0505, 0.0473, 0.0563, 0.0301, 0.1178, 0.0243, 0.0250, 0.0566, 0.0323, + 0.1452, 0.0098, 0.0680, 0.0318, 0.1222, 0.0534, 0.1775, 0.1222, 0.0329, + 0.0978, 0.1551, 0.0562, 0.0584, 0.1119, 0.0866, 0.1366, 0.1661, 0.0329, + 0.0825, 0.0399, 0.0979, 0.0680, 0.0480, 0.0244, 0.0301, 0.0582, 0.1163, + 0.0143, 0.0866, 0.1150, 0.0794, 0.0644, 0.0577, 0.1003, 0.0355, 0.0350, + 0.0786, 0.1640, 0.0923, 0.0282, 0.1191, 0.0705, 0.1548, 0.0162, 0.0199, + 0.1170, 0.0764, 0.0434, 0.1522, 0.0371, 0.0423, 0.0877, 0.1015, 0.1570, + 0.0303, 0.0271, 0.1111, 0.0759, 0.0147, 0.0454, 0.1740, 0.0865, 0.0629, + 0.0438, 0.0298, 0.0975, 0.0630, 0.0254, 0.0656, 0.0329, 0.0712, 0.0164, + 0.0338, 0.1003, 0.0181, 0.0938, 0.0366, 0.1311, 0.0772, 0.0750, 0.0897, + 0.0565, 0.0184, 0.0206, 0.1012, 0.0356, 0.0436, 0.0812, 0.0089, 0.1295, + 0.0388, 0.0991], device='cuda:0')), ('module.transform_net.bn2.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.transform_net.bn3.weight', tensor([0.2260, 0.2437, 0.0726, ..., 0.1530, 0.0871, 0.0971], device='cuda:0')), ('module.transform_net.bn3.bias', tensor([-1.3205e-06, -4.3629e-07, -1.4959e-07, ..., -2.8803e-07, + -4.2009e-07, -4.3902e-08], device='cuda:0')), ('module.transform_net.bn3.running_mean', tensor([ 0.1373, 0.0230, 0.0417, ..., 0.0454, 0.0990, -0.1563], + device='cuda:0')), ('module.transform_net.bn3.running_var', tensor([0.0370, 0.0143, 0.0088, ..., 0.0297, 0.0114, 0.0166], device='cuda:0')), ('module.transform_net.bn3.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.transform_net.conv1.0.weight', tensor([[[[ 0.1584]], + + [[-0.1549]], + + [[-0.0523]], + + [[ 0.1744]], + + [[-0.3707]], + + [[ 0.2332]]], + + + [[[-0.0770]], + + [[ 0.1956]], + + [[ 0.0510]], + + [[-0.0325]], + + [[ 0.1225]], + + [[ 0.0062]]], + + + [[[ 0.1460]], + + [[-0.1603]], + + [[-0.0398]], + + [[-0.0285]], + + [[ 0.0585]], + + [[ 0.0036]]], + + + [[[ 0.3053]], + + [[ 0.0798]], + + [[-0.0937]], + + [[-0.1668]], + + [[-0.0602]], + + [[-0.1441]]], + + + [[[-0.1131]], + + [[ 0.0222]], + + [[ 0.1807]], + + [[ 0.2170]], + + [[-0.3767]], + + [[ 0.2329]]], + + + [[[ 0.0593]], + + [[ 0.1863]], + + [[ 0.1875]], + + [[-0.3128]], + + [[-0.3324]], + + [[-0.1473]]], + + + [[[ 0.2831]], + + [[-0.0521]], + + [[ 0.1338]], + + [[-0.1695]], + + [[ 0.3511]], + + [[-0.1679]]], + + + [[[ 0.2436]], + + [[ 0.0139]], + + [[-0.1488]], + + [[ 0.1855]], + + [[-0.1863]], + + [[ 0.1172]]], + + + [[[-0.1115]], + + [[-0.0329]], + + [[-0.3367]], + + [[-0.1745]], + + [[ 0.1978]], + + [[-0.0971]]], + + + [[[ 0.2633]], + + [[ 0.2468]], + + [[-0.0049]], + + [[-0.2560]], + + [[ 0.2292]], + + [[ 0.1150]]], + + + [[[-0.2215]], + + [[ 0.1363]], + + [[ 0.1482]], + + [[ 0.3207]], + + [[-0.1897]], + + [[-0.0687]]], + + + [[[-0.0054]], + + [[ 0.0596]], + + [[-0.1252]], + + [[-0.2655]], + + [[ 0.2191]], + + [[-0.0821]]], + + + [[[ 0.1714]], + + [[ 0.0184]], + + [[ 0.1303]], + + [[ 0.0923]], + + [[ 0.1363]], + + [[ 0.1966]]], + + + [[[-0.3031]], + + [[ 0.1165]], + + [[-0.1593]], + + [[-0.2451]], + + [[ 0.0247]], + + [[-0.0624]]], + + + [[[ 0.1721]], + + [[-0.1726]], + + [[-0.2826]], + + [[ 0.2720]], + + [[-0.0630]], + + [[ 0.2087]]], + + + [[[ 0.1310]], + + [[-0.2788]], + + [[ 0.0703]], + + [[ 0.1030]], + + [[-0.2452]], + + [[-0.1816]]], + + + [[[ 0.1097]], + + [[ 0.0669]], + + [[-0.1137]], + + [[-0.1190]], + + [[ 0.3474]], + + [[-0.0518]]], + + + [[[ 0.1142]], + + [[ 0.1274]], + + [[-0.1817]], + + [[-0.3258]], + + [[ 0.3584]], + + [[ 0.0199]]], + + + [[[ 0.2142]], + + [[ 0.0704]], + + [[ 0.0866]], + + [[ 0.2694]], + + [[ 0.3781]], + + [[-0.2507]]], + + + [[[ 0.0536]], + + [[ 0.1922]], + + [[ 0.2312]], + + [[ 0.2425]], + + [[-0.2703]], + + [[-0.2730]]], + + + [[[-0.2144]], + + [[ 0.0288]], + + [[ 0.3136]], + + [[-0.2513]], + + [[ 0.1916]], + + [[-0.2050]]], + + + [[[-0.3253]], + + [[-0.0730]], + + [[ 0.1448]], + + [[ 0.2929]], + + [[-0.2139]], + + [[ 0.0903]]], + + + [[[ 0.2616]], + + [[ 0.2309]], + + [[ 0.1516]], + + [[ 0.2732]], + + [[ 0.2357]], + + [[ 0.0892]]], + + + [[[-0.2489]], + + [[-0.3232]], + + [[-0.1666]], + + [[-0.0482]], + + [[-0.2368]], + + [[ 0.1477]]], + + + [[[ 0.1132]], + + [[-0.0917]], + + [[ 0.1443]], + + [[ 0.1298]], + + [[ 0.2258]], + + [[ 0.2455]]], + + + [[[-0.1094]], + + [[ 0.2976]], + + [[-0.0425]], + + [[-0.0225]], + + [[-0.3587]], + + [[-0.2430]]], + + + [[[-0.2121]], + + [[-0.1325]], + + [[ 0.1739]], + + [[-0.1205]], + + [[-0.2954]], + + [[ 0.1604]]], + + + [[[ 0.1194]], + + [[ 0.2433]], + + [[-0.1884]], + + [[ 0.0874]], + + [[-0.1398]], + + [[-0.0885]]], + + + [[[-0.2934]], + + [[-0.1531]], + + [[-0.1193]], + + [[ 0.1621]], + + [[ 0.0649]], + + [[ 0.0947]]], + + + [[[ 0.3571]], + + [[ 0.3628]], + + [[ 0.2531]], + + [[ 0.0156]], + + [[-0.2695]], + + [[ 0.3095]]], + + + [[[-0.0749]], + + [[-0.0147]], + + [[-0.3069]], + + [[-0.0809]], + + [[-0.2517]], + + [[ 0.3526]]], + + + [[[-0.2889]], + + [[-0.2407]], + + [[-0.0032]], + + [[-0.1816]], + + [[ 0.1358]], + + [[-0.1456]]], + + + [[[-0.1604]], + + [[ 0.0194]], + + [[ 0.2341]], + + [[-0.2574]], + + [[ 0.1811]], + + [[ 0.2567]]], + + + [[[ 0.2139]], + + [[-0.1616]], + + [[ 0.0780]], + + [[-0.2749]], + + [[ 0.2682]], + + [[-0.1267]]], + + + [[[ 0.2452]], + + [[ 0.0107]], + + [[-0.0308]], + + [[-0.0328]], + + [[ 0.0593]], + + [[ 0.1760]]], + + + [[[ 0.0423]], + + [[ 0.0774]], + + [[-0.2462]], + + [[ 0.2698]], + + [[ 0.3394]], + + [[ 0.2977]]], + + + [[[ 0.1493]], + + [[-0.2377]], + + [[-0.0960]], + + [[ 0.2627]], + + [[-0.3031]], + + [[ 0.3239]]], + + + [[[-0.1967]], + + [[-0.2593]], + + [[ 0.1475]], + + [[-0.2768]], + + [[-0.0813]], + + [[ 0.2859]]], + + + [[[ 0.2451]], + + [[ 0.3295]], + + [[-0.0675]], + + [[-0.3027]], + + [[ 0.3746]], + + [[-0.0713]]], + + + [[[-0.1396]], + + [[ 0.0800]], + + [[-0.1713]], + + [[ 0.2528]], + + [[ 0.2200]], + + [[-0.2727]]], + + + [[[-0.3015]], + + [[-0.0354]], + + [[ 0.1652]], + + [[-0.0121]], + + [[-0.1955]], + + [[ 0.0030]]], + + + [[[-0.2930]], + + [[-0.2238]], + + [[-0.1634]], + + [[ 0.2905]], + + [[ 0.3230]], + + [[-0.1144]]], + + + [[[ 0.0183]], + + [[ 0.1670]], + + [[ 0.0457]], + + [[-0.1912]], + + [[ 0.2208]], + + [[ 0.2305]]], + + + [[[-0.0131]], + + [[-0.0302]], + + [[ 0.2207]], + + [[ 0.2814]], + + [[-0.2071]], + + [[ 0.3436]]], + + + [[[ 0.0789]], + + [[-0.1783]], + + [[ 0.1753]], + + [[-0.2881]], + + [[ 0.3232]], + + [[-0.1992]]], + + + [[[ 0.0559]], + + [[ 0.0945]], + + [[ 0.0636]], + + [[ 0.2896]], + + [[-0.1457]], + + [[-0.1419]]], + + + [[[-0.3397]], + + [[-0.0255]], + + [[-0.1840]], + + [[ 0.0125]], + + [[-0.0719]], + + [[ 0.1360]]], + + + [[[ 0.3084]], + + [[ 0.0748]], + + [[-0.0997]], + + [[-0.2070]], + + [[-0.3221]], + + [[ 0.3056]]], + + + [[[-0.0646]], + + [[ 0.0512]], + + [[ 0.0210]], + + [[-0.2516]], + + [[ 0.2191]], + + [[ 0.2148]]], + + + [[[ 0.0231]], + + [[-0.1911]], + + [[-0.1566]], + + [[-0.0022]], + + [[-0.2124]], + + [[-0.2201]]], + + + [[[-0.0506]], + + [[-0.1049]], + + [[ 0.0792]], + + [[-0.1008]], + + [[-0.0570]], + + [[-0.1439]]], + + + [[[-0.1747]], + + [[ 0.1981]], + + [[ 0.0533]], + + [[ 0.2704]], + + [[-0.0670]], + + [[-0.0917]]], + + + [[[-0.1555]], + + [[-0.0829]], + + [[ 0.2084]], + + [[-0.3651]], + + [[-0.1302]], + + [[ 0.2298]]], + + + [[[ 0.2357]], + + [[ 0.1383]], + + [[-0.0769]], + + [[-0.0350]], + + [[-0.0601]], + + [[-0.1410]]], + + + [[[ 0.1519]], + + [[-0.1167]], + + [[-0.1945]], + + [[ 0.0901]], + + [[-0.2757]], + + [[-0.0857]]], + + + [[[ 0.0982]], + + [[ 0.0932]], + + [[ 0.2429]], + + [[-0.1877]], + + [[-0.0088]], + + [[-0.1150]]], + + + [[[-0.2128]], + + [[-0.1974]], + + [[ 0.1027]], + + [[ 0.0433]], + + [[-0.1795]], + + [[-0.2663]]], + + + [[[-0.3183]], + + [[ 0.3276]], + + [[-0.0553]], + + [[-0.0866]], + + [[-0.1459]], + + [[-0.1854]]], + + + [[[-0.3464]], + + [[-0.2226]], + + [[ 0.2287]], + + [[ 0.0934]], + + [[-0.1392]], + + [[-0.1416]]], + + + [[[ 0.2405]], + + [[ 0.2596]], + + [[ 0.3477]], + + [[ 0.1095]], + + [[ 0.1825]], + + [[-0.0276]]], + + + [[[-0.2307]], + + [[-0.3555]], + + [[ 0.2273]], + + [[-0.0555]], + + [[ 0.1575]], + + [[ 0.0107]]], + + + [[[-0.2692]], + + [[-0.1680]], + + [[ 0.0951]], + + [[-0.0811]], + + [[-0.3656]], + + [[ 0.1269]]], + + + [[[ 0.0135]], + + [[-0.1125]], + + [[ 0.1287]], + + [[ 0.0839]], + + [[-0.1510]], + + [[ 0.3184]]], + + + [[[ 0.1597]], + + [[ 0.0357]], + + [[-0.0480]], + + [[-0.2962]], + + [[ 0.2516]], + + [[-0.0955]]]], device='cuda:0')), ('module.transform_net.conv1.1.weight', tensor([0.9275, 0.9019, 0.8220, 0.8269, 0.9125, 0.8320, 0.9235, 0.9176, 0.8651, + 0.8926, 0.9366, 0.8575, 0.9010, 0.8116, 0.8098, 0.8780, 0.7474, 0.7707, + 0.7645, 0.7270, 0.8303, 0.8718, 0.9079, 0.9308, 0.8282, 0.8368, 0.8314, + 0.8943, 0.8155, 0.8723, 0.9216, 0.8665, 0.8329, 0.9230, 0.9218, 0.7246, + 0.8695, 0.7112, 0.9202, 0.9024, 0.8554, 0.8208, 0.9062, 0.8142, 0.8495, + 0.8911, 0.9491, 0.8981, 0.9079, 0.8774, 0.7910, 0.8610, 0.8535, 0.8589, + 0.8606, 0.7686, 0.8902, 0.8865, 0.9017, 0.8344, 0.9526, 0.8976, 0.8785, + 0.9070], device='cuda:0')), ('module.transform_net.conv1.1.bias', tensor([-1.0562e-03, -3.7226e-03, 1.3831e-02, -3.8120e-03, 3.0793e-03, + 1.0190e-02, 7.1820e-03, 7.2552e-04, -4.4474e-03, -7.9938e-03, + 1.2937e-03, -1.3946e-03, -1.0807e-03, 6.0967e-03, -8.0081e-03, + -4.4927e-03, 1.7978e-03, -1.2391e-04, 2.1957e-03, -2.4691e-05, + 6.0693e-04, -4.5748e-03, 8.0330e-03, 5.5135e-03, -1.4094e-03, + 2.8556e-03, 6.7849e-04, 1.7594e-03, -3.8234e-03, 3.9159e-03, + 1.3642e-04, -9.6159e-03, 1.3848e-02, 5.8601e-03, 1.3965e-04, + 6.9075e-03, 5.5822e-03, 2.2187e-03, 2.3064e-03, 2.9992e-03, + 7.1861e-04, 1.0549e-03, -2.5443e-03, 3.8391e-04, -1.1725e-02, + -1.1457e-04, 5.8880e-03, -4.9409e-03, 2.7778e-03, -5.6326e-03, + -3.5553e-04, -7.4553e-03, -5.7946e-03, -1.0749e-02, -5.5001e-05, + -2.1818e-03, -6.0871e-03, -9.2138e-03, 1.6463e-02, 1.0079e-02, + 5.0877e-03, -1.1799e-02, 1.2412e-02, -5.6131e-03], device='cuda:0')), ('module.transform_net.conv1.1.running_mean', tensor([ 0.3973, -0.0452, -0.0407, -0.3346, 0.4613, -0.5564, -0.3521, 0.3470, + -0.3187, -0.3186, 0.4428, -0.4471, 0.2529, -0.4050, 0.5302, 0.0497, + -0.2089, -0.4787, 0.2607, 0.2067, -0.4967, 0.4931, 0.4631, 0.0127, + 0.3378, -0.1750, -0.0887, 0.0801, 0.2989, 0.2025, 0.0822, -0.3575, + -0.2386, -0.4870, 0.0526, 0.5788, 0.5827, -0.2510, -0.4966, 0.2228, + -0.0166, 0.3712, -0.1541, 0.6225, -0.5488, 0.3536, 0.0975, -0.1348, + -0.2541, -0.1311, -0.2351, 0.3539, -0.4166, -0.1345, 0.0857, -0.3491, + -0.0893, -0.2378, 0.0585, 0.1490, -0.0771, -0.0489, 0.3106, -0.5010], + device='cuda:0')), ('module.transform_net.conv1.1.running_var', tensor([0.1329, 0.0087, 0.0034, 0.0643, 0.1695, 0.2526, 0.1190, 0.0889, 0.0814, + 0.1511, 0.2181, 0.1649, 0.0331, 0.1231, 0.1643, 0.0489, 0.0811, 0.2653, + 0.2039, 0.1494, 0.1558, 0.1964, 0.1773, 0.0314, 0.0686, 0.0659, 0.0669, + 0.0241, 0.0587, 0.0474, 0.0538, 0.0813, 0.1449, 0.1895, 0.0075, 0.2191, + 0.2042, 0.1548, 0.2469, 0.1477, 0.0173, 0.2106, 0.0942, 0.2066, 0.2248, + 0.1726, 0.0064, 0.1339, 0.1446, 0.0261, 0.0268, 0.1454, 0.2668, 0.0080, + 0.0486, 0.0758, 0.0262, 0.0326, 0.0279, 0.0387, 0.0181, 0.0711, 0.0417, + 0.2074], device='cuda:0')), ('module.transform_net.conv1.1.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.transform_net.conv2.0.weight', tensor([[[[-0.0378]], + + [[ 0.0507]], + + [[ 0.0394]], + + ..., + + [[ 0.0499]], + + [[-0.0378]], + + [[ 0.0551]]], + + + [[[-0.0457]], + + [[-0.0944]], + + [[ 0.0374]], + + ..., + + [[-0.0103]], + + [[-0.0659]], + + [[ 0.0175]]], + + + [[[-0.0792]], + + [[ 0.0734]], + + [[ 0.0414]], + + ..., + + [[-0.0793]], + + [[-0.0287]], + + [[-0.0231]]], + + + ..., + + + [[[-0.0312]], + + [[ 0.0610]], + + [[ 0.0267]], + + ..., + + [[-0.0239]], + + [[ 0.0435]], + + [[ 0.0205]]], + + + [[[-0.1111]], + + [[-0.0923]], + + [[ 0.1025]], + + ..., + + [[ 0.1011]], + + [[-0.1051]], + + [[ 0.0101]]], + + + [[[ 0.0963]], + + [[-0.0522]], + + [[-0.0938]], + + ..., + + [[-0.0435]], + + [[ 0.0398]], + + [[-0.0113]]]], device='cuda:0')), ('module.transform_net.conv2.1.weight', tensor([0.7888, 0.8137, 0.5273, 0.8139, 0.5626, 0.5095, 0.6382, 0.7313, 0.6782, + 0.5885, 0.7845, 0.7150, 0.8483, 0.7457, 0.8340, 0.8451, 0.5911, 0.7267, + 0.6653, 0.7064, 0.8227, 0.8207, 0.6733, 0.7516, 0.7975, 0.8052, 0.5938, + 0.5939, 0.6069, 0.5751, 0.7120, 0.8550, 0.7510, 0.8435, 0.5960, 0.7657, + 0.6822, 0.7027, 0.7741, 0.8267, 0.6078, 0.8552, 0.8364, 0.8056, 0.8854, + 0.8158, 0.5502, 0.8215, 0.8333, 0.6022, 0.8733, 0.8029, 0.7775, 0.6513, + 0.5536, 0.7592, 0.5742, 0.6606, 0.7753, 0.8704, 0.8858, 0.5870, 0.5629, + 0.7906, 0.7605, 0.8450, 0.8297, 0.6893, 0.7834, 0.7048, 0.8268, 0.6847, + 0.8599, 0.8613, 0.7632, 0.8390, 0.7270, 0.8333, 0.7941, 0.5636, 0.7375, + 0.7993, 0.8546, 0.8105, 0.6966, 0.8224, 0.6365, 0.7047, 0.6616, 0.7402, + 0.8789, 0.8059, 0.8140, 0.8351, 0.8481, 0.7621, 0.5946, 0.7633, 0.6194, + 0.7496, 0.8560, 0.8107, 0.7495, 0.6353, 0.8357, 0.7569, 0.7304, 0.7947, + 0.8434, 0.8461, 0.8376, 0.5731, 0.4576, 0.6504, 0.6630, 0.6213, 0.6924, + 0.7659, 0.5234, 0.7876, 0.7163, 0.7625, 0.8533, 0.7532, 0.8173, 0.8940, + 0.7231, 0.6060], device='cuda:0')), ('module.transform_net.conv2.1.bias', tensor([-0.0102, 0.0015, 0.0055, -0.0005, 0.0139, 0.0031, 0.0024, -0.0054, + 0.0012, 0.0029, 0.0134, 0.0030, -0.0048, 0.0108, 0.0021, 0.0118, + 0.0087, -0.0041, 0.0034, 0.0047, 0.0033, 0.0022, -0.0107, -0.0098, + -0.0087, 0.0010, -0.0019, 0.0048, 0.0004, -0.0048, -0.0060, 0.0035, + -0.0024, -0.0005, 0.0038, 0.0105, 0.0049, 0.0067, -0.0092, -0.0050, + 0.0029, -0.0039, -0.0065, 0.0015, 0.0032, 0.0035, 0.0016, 0.0013, + 0.0111, 0.0101, 0.0063, -0.0004, -0.0065, -0.0064, -0.0074, 0.0099, + -0.0055, -0.0015, 0.0074, 0.0019, 0.0037, 0.0021, 0.0021, -0.0023, + 0.0077, 0.0140, -0.0130, 0.0018, -0.0058, 0.0021, -0.0004, 0.0016, + -0.0060, 0.0104, -0.0078, -0.0001, 0.0033, -0.0175, -0.0010, -0.0009, + 0.0031, 0.0111, -0.0018, 0.0062, 0.0026, -0.0070, -0.0008, 0.0101, + -0.0052, -0.0061, 0.0010, -0.0018, 0.0047, 0.0016, 0.0026, 0.0050, + 0.0024, 0.0053, -0.0079, 0.0126, 0.0055, -0.0023, 0.0068, -0.0003, + -0.0023, 0.0077, -0.0013, 0.0020, 0.0099, 0.0051, 0.0119, -0.0059, + 0.0054, -0.0005, -0.0069, -0.0083, 0.0065, -0.0002, -0.0018, 0.0054, + -0.0086, 0.0031, -0.0017, -0.0084, 0.0010, 0.0027, -0.0035, 0.0087], + device='cuda:0')), ('module.transform_net.conv2.1.running_mean', tensor([ 0.1365, -0.0587, -0.3559, 0.3706, -0.2779, 0.1596, 0.0730, -0.2106, + 0.1720, -0.1327, -0.0405, 0.0453, 0.1111, -0.0137, -0.0300, -0.0234, + -0.0106, -0.0650, 0.1262, -0.0045, -0.1445, 0.0743, 0.0540, -0.0727, + -0.2320, -0.1412, 0.0902, 0.0538, -0.0704, -0.0558, 0.0812, -0.0222, + -0.2435, -0.2090, -0.0739, 0.1089, 0.1514, -0.0790, -0.1458, 0.1141, + -0.4097, -0.0055, -0.0653, 0.0355, -0.0485, -0.1345, -0.1108, 0.0902, + -0.1913, -0.2523, -0.0691, 0.1981, -0.0117, -0.2619, -0.1539, 0.1080, + -0.1877, 0.1062, 0.0805, -0.1370, -0.2152, -0.4199, 0.1185, -0.1104, + -0.1510, 0.1458, 0.3430, 0.0074, 0.1497, 0.1488, 0.0536, -0.2573, + 0.2205, 0.2069, -0.0927, 0.0433, -0.0235, -0.0520, 0.3270, 0.1209, + -0.1158, -0.0276, 0.0728, 0.0320, 0.3710, -0.0110, -0.1335, -0.0924, + 0.0926, 0.0331, 0.0363, -0.0846, 0.1385, -0.0343, 0.0052, -0.3218, + 0.3276, 0.0118, -0.1096, 0.1240, -0.0398, -0.1811, 0.1365, -0.0184, + 0.1517, -0.0295, 0.0222, -0.0087, -0.2759, 0.2717, -0.1104, -0.1339, + -0.0727, -0.2747, 0.0832, -0.0224, -0.1548, -0.1435, -0.0871, -0.1645, + -0.1869, -0.2086, 0.1161, 0.0350, 0.0647, -0.1451, -0.1575, -0.0065], + device='cuda:0')), ('module.transform_net.conv2.1.running_var', tensor([0.0959, 0.0718, 0.2080, 0.1024, 0.0352, 0.0453, 0.1981, 0.0814, 0.1024, + 0.0652, 0.0216, 0.1924, 0.0618, 0.0574, 0.0325, 0.0293, 0.0499, 0.1066, + 0.0357, 0.1419, 0.0387, 0.0306, 0.1243, 0.0560, 0.0592, 0.0446, 0.0878, + 0.0505, 0.0473, 0.0563, 0.0301, 0.1178, 0.0243, 0.0250, 0.0566, 0.0323, + 0.1452, 0.0098, 0.0680, 0.0318, 0.1222, 0.0534, 0.1775, 0.1222, 0.0329, + 0.0978, 0.1551, 0.0562, 0.0584, 0.1119, 0.0866, 0.1366, 0.1661, 0.0329, + 0.0825, 0.0399, 0.0979, 0.0680, 0.0480, 0.0244, 0.0301, 0.0582, 0.1163, + 0.0143, 0.0866, 0.1150, 0.0794, 0.0644, 0.0577, 0.1003, 0.0355, 0.0350, + 0.0786, 0.1640, 0.0923, 0.0282, 0.1191, 0.0705, 0.1548, 0.0162, 0.0199, + 0.1170, 0.0764, 0.0434, 0.1522, 0.0371, 0.0423, 0.0877, 0.1015, 0.1570, + 0.0303, 0.0271, 0.1111, 0.0759, 0.0147, 0.0454, 0.1740, 0.0865, 0.0629, + 0.0438, 0.0298, 0.0975, 0.0630, 0.0254, 0.0656, 0.0329, 0.0712, 0.0164, + 0.0338, 0.1003, 0.0181, 0.0938, 0.0366, 0.1311, 0.0772, 0.0750, 0.0897, + 0.0565, 0.0184, 0.0206, 0.1012, 0.0356, 0.0436, 0.0812, 0.0089, 0.1295, + 0.0388, 0.0991], device='cuda:0')), ('module.transform_net.conv2.1.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.transform_net.conv3.0.weight', tensor([[[ 3.1261e-03], + [-4.2086e-05], + [-6.5251e-04], + ..., + [-3.9203e-02], + [-4.1318e-02], + [ 1.2954e-02]], + + [[-5.2068e-03], + [-1.2664e-02], + [ 1.4388e-02], + ..., + [-2.2359e-02], + [ 1.1839e-02], + [-8.9220e-04]], + + [[ 2.0447e-03], + [ 2.5758e-04], + [ 3.0001e-04], + ..., + [ 1.4919e-02], + [-4.0045e-04], + [-4.6712e-06]], + + ..., + + [[-4.7179e-03], + [ 3.6316e-03], + [ 1.3634e-02], + ..., + [-6.7965e-03], + [-1.3159e-02], + [-2.9168e-03]], + + [[-7.4207e-04], + [-1.8946e-02], + [ 9.9497e-03], + ..., + [-3.3735e-03], + [ 1.1675e-02], + [-8.6774e-03]], + + [[-3.1118e-03], + [ 9.5883e-04], + [-1.1837e-02], + ..., + [-3.1981e-02], + [-1.3424e-02], + [ 1.2685e-02]]], device='cuda:0')), ('module.transform_net.conv3.1.weight', tensor([0.2260, 0.2437, 0.0726, ..., 0.1530, 0.0871, 0.0971], device='cuda:0')), ('module.transform_net.conv3.1.bias', tensor([-1.3205e-06, -4.3629e-07, -1.4959e-07, ..., -2.8803e-07, + -4.2009e-07, -4.3902e-08], device='cuda:0')), ('module.transform_net.conv3.1.running_mean', tensor([ 0.1373, 0.0230, 0.0417, ..., 0.0454, 0.0990, -0.1563], + device='cuda:0')), ('module.transform_net.conv3.1.running_var', tensor([0.0370, 0.0143, 0.0088, ..., 0.0297, 0.0114, 0.0166], device='cuda:0')), ('module.transform_net.conv3.1.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.transform_net.linear1.weight', tensor([[ 8.2508e-03, 6.7638e-03, 1.1646e-03, ..., -6.2727e-03, + 5.6403e-03, -1.1974e-04], + [ 7.3440e-03, 6.1518e-03, 7.6026e-04, ..., 1.4510e-02, + -1.6589e-03, 2.7994e-03], + [ 2.8477e-03, -4.6668e-03, 3.8916e-04, ..., -9.1436e-03, + 7.5263e-03, -1.8536e-03], + ..., + [-1.0596e-03, 6.1859e-03, 6.4966e-04, ..., 8.5257e-04, + 2.3146e-04, -9.4841e-04], + [-6.5033e-03, 3.3371e-03, -8.4808e-04, ..., -3.0498e-03, + 2.2534e-03, 3.5551e-05], + [-5.3057e-03, 3.8027e-04, -3.3572e-04, ..., -3.1654e-03, + 9.4897e-04, -6.7308e-04]], device='cuda:0')), ('module.transform_net.bn4.weight', tensor([0.3589, 0.3166, 0.3121, 0.2892, 0.3853, 0.5026, 0.2317, 0.2887, 0.3867, + 0.3111, 0.2729, 0.2014, 0.2629, 0.2605, 0.2155, 0.3986, 0.4467, 0.3590, + 0.3739, 0.2689, 0.3286, 0.3180, 0.3197, 0.2856, 0.3698, 0.2558, 0.3141, + 0.2943, 0.3554, 0.2563, 0.3489, 0.3047, 0.3419, 0.2298, 0.2690, 0.2756, + 0.2562, 0.3344, 0.3491, 0.2798, 0.2423, 0.3388, 0.3007, 0.3043, 0.3067, + 0.2444, 0.4915, 0.2610, 0.3243, 0.2370, 0.2071, 0.3845, 0.4118, 0.2317, + 0.3169, 0.2285, 0.2103, 0.3644, 0.3548, 0.3727, 0.3436, 0.2503, 0.4535, + 0.4497, 0.5653, 0.3291, 0.3168, 0.3158, 0.4182, 0.2830, 0.2970, 0.2721, + 0.4228, 0.2754, 0.4108, 0.2578, 0.2776, 0.3949, 0.2047, 0.3591, 0.2689, + 0.2527, 0.4245, 0.2756, 0.3295, 0.3784, 0.4584, 0.2842, 0.3053, 0.2677, + 0.2959, 0.3347, 0.2693, 0.2083, 0.3531, 0.4570, 0.2690, 0.3185, 0.2358, + 0.2325, 0.1892, 0.2798, 0.3056, 0.2340, 0.2153, 0.1904, 0.1972, 0.5692, + 0.4571, 0.3003, 0.4308, 0.2346, 0.3238, 0.4188, 0.3521, 0.2991, 0.2391, + 0.3579, 0.2525, 0.3311, 0.5671, 0.3110, 0.3523, 0.3366, 0.3808, 0.2996, + 0.2713, 0.3635, 0.2946, 0.3183, 0.3438, 0.2744, 0.3052, 0.4232, 0.2928, + 0.2456, 0.4419, 0.2435, 0.3527, 0.3138, 0.2562, 0.2685, 0.3686, 0.4888, + 0.4855, 0.3166, 0.3025, 0.5221, 0.2042, 0.4126, 0.2606, 0.3205, 0.3426, + 0.1839, 0.2777, 0.3321, 0.4848, 0.2594, 0.5290, 0.3027, 0.3566, 0.3320, + 0.4632, 0.3855, 0.3267, 0.4716, 0.2470, 0.3182, 0.2156, 0.3769, 0.2742, + 0.4253, 0.2668, 0.3286, 0.3295, 0.2704, 0.3275, 0.3552, 0.2547, 0.2440, + 0.2914, 0.2792, 0.2356, 0.2707, 0.3096, 0.2199, 0.2882, 0.3458, 0.4177, + 0.4090, 0.4431, 0.4246, 0.5141, 0.4078, 0.2990, 0.2708, 0.2270, 0.2407, + 0.2801, 0.2917, 0.3115, 0.2700, 0.5743, 0.3987, 0.5450, 0.4017, 0.4969, + 0.2756, 0.3716, 0.3913, 0.3105, 0.2977, 0.2774, 0.3058, 0.4398, 0.3956, + 0.2331, 0.2505, 0.4858, 0.4705, 0.4528, 0.3898, 0.2972, 0.2462, 0.2593, + 0.3850, 0.2862, 0.2645, 0.5237, 0.2302, 0.2980, 0.3955, 0.3233, 0.4325, + 0.3468, 0.2754, 0.4533, 0.3648, 0.2878, 0.3239, 0.3439, 0.3510, 0.2883, + 0.5317, 0.3240, 0.2481, 0.2285, 0.3703, 0.2123, 0.2589, 0.3019, 0.2065, + 0.3293, 0.3702, 0.3029, 0.4097, 0.3102, 0.3291, 0.3635, 0.4642, 0.3903, + 0.5907, 0.3291, 0.2606, 0.3595, 0.2131, 0.2780, 0.3803, 0.2829, 0.2527, + 0.3853, 0.2344, 0.3337, 0.2513, 0.3290, 0.2696, 0.2926, 0.4167, 0.3216, + 0.5328, 0.4358, 0.2788, 0.2190, 0.2682, 0.5014, 0.2090, 0.1927, 0.2560, + 0.3519, 0.2471, 0.2073, 0.2036, 0.2415, 0.2288, 0.2876, 0.3319, 0.3679, + 0.2994, 0.3547, 0.2788, 0.3225, 0.5020, 0.2427, 0.3746, 0.2920, 0.2416, + 0.3297, 0.2375, 0.3106, 0.3859, 0.3765, 0.4114, 0.2299, 0.3002, 0.3060, + 0.2944, 0.3167, 0.3459, 0.1833, 0.2519, 0.3879, 0.2904, 0.4537, 0.3835, + 0.3657, 0.3187, 0.2509, 0.1997, 0.3515, 0.4920, 0.2814, 0.4620, 0.3064, + 0.2318, 0.3600, 0.3076, 0.3422, 0.2756, 0.3049, 0.2736, 0.3937, 0.2723, + 0.2851, 0.3289, 0.2153, 0.3028, 0.2727, 0.2289, 0.3073, 0.3014, 0.3208, + 0.2983, 0.2895, 0.3169, 0.2919, 0.3042, 0.2717, 0.2297, 0.2403, 0.4338, + 0.3871, 0.2106, 0.2709, 0.3231, 0.3261, 0.3104, 0.2634, 0.3121, 0.3739, + 0.2735, 0.3457, 0.3150, 0.3387, 0.4678, 0.2590, 0.2213, 0.2990, 0.3970, + 0.2515, 0.4288, 0.3470, 0.2418, 0.3874, 0.4790, 0.3720, 0.2628, 0.3706, + 0.2945, 0.2121, 0.5900, 0.2819, 0.3283, 0.4410, 0.2566, 0.3389, 0.4032, + 0.2335, 0.2813, 0.3748, 0.2790, 0.2192, 0.2079, 0.4386, 0.5279, 0.2973, + 0.3046, 0.3217, 0.3123, 0.5242, 0.3303, 0.3518, 0.2245, 0.3842, 0.4572, + 0.3523, 0.2481, 0.4604, 0.4709, 0.3433, 0.3519, 0.5282, 0.4656, 0.4301, + 0.3707, 0.3428, 0.2393, 0.1973, 0.3313, 0.3335, 0.4090, 0.3307, 0.3148, + 0.2960, 0.3126, 0.2698, 0.2810, 0.2993, 0.3308, 0.2808, 0.3212, 0.2213, + 0.2764, 0.2492, 0.2819, 0.2913, 0.3591, 0.2289, 0.2390, 0.3574, 0.2839, + 0.4425, 0.2531, 0.2163, 0.2616, 0.2278, 0.2944, 0.4696, 0.4636, 0.1863, + 0.3454, 0.3998, 0.4897, 0.3370, 0.2498, 0.3641, 0.2455, 0.2456, 0.4150, + 0.2649, 0.2051, 0.3509, 0.3237, 0.2681, 0.4491, 0.2084, 0.3768, 0.2188, + 0.2522, 0.2437, 0.3386, 0.2905, 0.2149, 0.4311, 0.2246, 0.4307, 0.2494, + 0.3088, 0.2770, 0.2618, 0.2463, 0.2551, 0.3779, 0.3107, 0.3891, 0.4263, + 0.4773, 0.2589, 0.2806, 0.3502, 0.3564, 0.3604, 0.2772, 0.3158, 0.3129, + 0.3384, 0.3756, 0.4840, 0.2549, 0.4074, 0.3513, 0.3730, 0.2760], + device='cuda:0')), ('module.transform_net.bn4.bias', tensor([ 3.2475e-03, 9.9143e-03, -1.5392e-02, -7.4935e-04, -1.3307e-03, + 4.9069e-03, -3.7546e-04, 1.5528e-03, 8.6651e-04, -6.4351e-03, + -5.6989e-03, -1.7046e-03, -1.0218e-03, 1.4081e-02, 8.8798e-04, + -4.6545e-03, 2.1636e-03, -2.6478e-03, -5.6440e-04, -3.3742e-03, + 5.5607e-03, 1.4885e-03, -2.0730e-02, -1.4841e-02, -5.2077e-03, + -1.1326e-02, 1.8363e-03, -4.2151e-03, -1.5966e-02, -8.0523e-03, + -2.9187e-03, -6.1439e-03, -9.1927e-03, -4.0607e-04, -5.7285e-03, + -5.1110e-03, -2.0293e-03, -3.2010e-03, -5.9656e-04, -4.7794e-03, + -8.2080e-03, -9.6856e-03, -7.7747e-04, -2.5114e-03, -1.2455e-03, + 1.2634e-02, -9.7413e-03, -8.3607e-03, -9.5499e-03, -6.6070e-03, + 4.1642e-03, -2.1290e-03, -6.1058e-03, -7.9193e-03, 7.5988e-03, + 2.0326e-03, -1.4238e-02, 7.1253e-03, -6.0822e-03, 3.3777e-02, + -7.3459e-04, -1.2487e-02, -1.2276e-02, -6.4427e-03, 4.8623e-03, + -1.0354e-02, 2.7360e-03, 6.9307e-03, -1.3451e-02, -6.4363e-03, + -4.4975e-03, -1.2730e-02, -1.0151e-02, -9.9805e-03, 3.4681e-03, + -8.8072e-03, -4.0502e-04, 9.6062e-03, -7.7270e-03, -1.4338e-02, + 1.2258e-02, -5.9086e-03, 1.0852e-03, -3.9337e-03, 9.7679e-03, + -8.6859e-04, -5.6076e-04, -1.1028e-02, -8.9646e-03, -1.0402e-02, + -1.0337e-02, -7.9413e-03, -1.0365e-02, -1.5055e-05, -9.8729e-03, + -9.4663e-03, -1.3635e-02, -6.5993e-03, 3.0468e-03, -1.9372e-03, + -2.6739e-03, -3.3132e-03, -3.8442e-03, -7.4887e-03, 1.8204e-03, + -1.0634e-02, -6.9051e-03, -1.2095e-02, 5.0214e-04, 1.8723e-03, + -1.2583e-02, -1.0630e-03, -4.2450e-03, 6.0137e-04, 5.6658e-04, + 7.3546e-03, 3.2035e-03, -1.1935e-02, 3.3309e-03, -1.6602e-02, + 1.8271e-03, -9.4647e-03, -1.6377e-02, 3.8380e-05, -4.0387e-03, + -7.7998e-03, -7.4471e-03, -8.2813e-03, 5.8888e-03, 2.5439e-03, + -5.8275e-03, -1.3833e-02, 1.7901e-02, -9.1365e-03, -1.0022e-02, + -6.0129e-03, 3.9476e-04, -1.0206e-02, -1.0560e-02, -4.6585e-03, + -8.6432e-03, -7.6046e-03, 1.0295e-03, -7.0343e-03, -6.9092e-03, + -6.9680e-03, -5.4200e-03, -5.5302e-03, -7.0830e-03, -2.8188e-03, + -1.9800e-03, -7.6823e-03, 1.4903e-03, -1.3439e-02, -1.0057e-02, + 9.3911e-04, -5.0956e-03, 9.3581e-03, -5.3215e-03, -3.5423e-03, + -8.1380e-03, -4.8479e-03, 5.6451e-03, -9.0839e-03, -2.5518e-03, + -1.1700e-03, -1.9358e-03, 5.7445e-03, 1.3306e-03, -7.5902e-03, + -1.2429e-02, 1.0327e-03, 8.3028e-03, 1.4981e-03, -6.1410e-03, + -5.7225e-03, -8.5679e-03, 2.8240e-03, -9.6868e-03, 8.5077e-04, + -1.4061e-02, 9.8270e-04, -9.7071e-03, -1.8238e-03, 1.0506e-03, + -9.1019e-03, 5.2421e-03, 8.3154e-04, -2.9203e-03, -5.7906e-04, + -8.7733e-03, -1.4310e-02, -5.8176e-03, 1.1968e-03, 3.0459e-03, + -9.7377e-03, -2.0470e-03, 2.1481e-03, 3.8754e-03, 6.5436e-03, + -5.4049e-03, 3.1944e-03, -2.6877e-03, 1.3384e-04, -3.4191e-03, + -1.3515e-02, -1.2203e-02, 1.8562e-03, -7.8503e-03, -1.1309e-02, + -5.5845e-03, 5.2509e-04, -8.2955e-03, -2.6210e-03, -3.8167e-03, + -9.1443e-03, -1.1614e-03, -5.9991e-03, -1.2616e-02, 1.3659e-03, + -7.6815e-03, -7.8209e-03, -2.0740e-02, -7.8025e-03, -1.2535e-02, + -4.0266e-03, 3.0922e-04, 9.9107e-04, -2.4401e-03, -8.8577e-03, + -1.0176e-02, -4.5819e-03, -5.7650e-03, -1.2163e-02, -2.3141e-03, + -3.9914e-03, -5.3619e-03, -3.3347e-03, -1.0703e-03, 4.9554e-03, + -1.8194e-03, -2.0280e-03, 4.7570e-03, 1.0936e-02, -5.5228e-03, + -4.4526e-03, -2.7842e-04, -1.0641e-02, -7.7850e-03, 7.2695e-03, + -7.6992e-03, -5.6409e-03, -9.1701e-04, 1.4714e-02, -4.9793e-03, + -3.1810e-04, -4.1559e-03, 1.2637e-02, -1.6521e-02, 9.2829e-04, + -1.6086e-02, -1.9304e-03, 1.9633e-03, -6.4347e-03, -9.2761e-03, + -5.1301e-04, -1.0652e-02, 1.3994e-03, -1.0595e-02, -1.1092e-02, + -1.6379e-02, -1.9215e-03, 4.9621e-03, -2.3614e-03, -3.9491e-03, + -7.3259e-03, 3.8535e-04, -1.8769e-02, -1.9692e-03, 4.8160e-03, + -6.5866e-03, -6.2489e-03, -2.7347e-03, -1.3649e-02, -1.2421e-03, + -7.4332e-03, 7.5233e-03, -5.4337e-03, -6.6287e-03, -5.2822e-03, + 4.7262e-04, -1.0907e-02, -3.7601e-03, -6.0133e-03, 1.4157e-02, + -1.3771e-02, -7.4764e-03, -8.6518e-03, -6.5909e-03, -4.4057e-03, + -6.8029e-03, 4.1216e-04, -1.2369e-02, -8.2031e-03, -9.7190e-03, + -1.9496e-03, -8.9473e-03, -2.3711e-03, -4.9870e-03, -9.0504e-03, + -8.5077e-03, -5.7895e-04, -1.4441e-02, 5.8253e-03, 3.5923e-03, + -3.3265e-03, -8.4265e-03, -4.6703e-03, -1.2857e-02, -2.2671e-04, + -3.7786e-03, -8.1355e-03, -1.6899e-02, -9.8595e-03, 2.8263e-03, + -1.4457e-03, 1.1552e-02, 1.7840e-04, 4.1601e-03, -1.0701e-02, + -5.6561e-03, -6.0396e-03, 2.9317e-03, 9.1888e-03, -1.7089e-03, + 9.0687e-03, -1.1256e-02, -4.0025e-03, -9.1470e-03, -9.9894e-03, + -1.1209e-02, -4.3834e-03, 7.8218e-03, 9.1716e-03, -6.1208e-03, + -7.4148e-05, 1.0085e-02, -5.5871e-04, -1.4948e-02, 8.2649e-03, + 7.7550e-03, 1.0893e-03, -5.0801e-03, -7.9351e-03, -8.4853e-03, + -7.6395e-03, -1.0572e-03, -2.3648e-03, 2.8200e-03, -4.4825e-03, + -8.4439e-03, 1.0884e-02, -4.9870e-03, -7.4437e-03, -2.1626e-03, + -2.9036e-03, -4.7397e-03, 8.6662e-03, -2.9736e-03, -5.6243e-03, + -1.1127e-02, 3.2809e-03, 4.2815e-03, 1.7159e-03, 4.4682e-04, + -1.4210e-03, -1.1546e-02, -2.6946e-03, -3.4833e-03, -4.8963e-03, + -7.1829e-03, -2.1372e-03, -1.6022e-02, -1.1578e-02, -8.0539e-03, + -4.1474e-03, -7.4204e-03, -6.6708e-03, -7.0149e-03, -5.7679e-03, + 6.9962e-03, 8.6974e-04, -4.0866e-03, -1.8922e-04, -8.5003e-03, + -6.7273e-03, 7.6297e-03, -5.4730e-03, -1.0522e-02, 1.2244e-02, + 3.1153e-03, -6.4161e-03, -7.3981e-03, -3.1739e-03, -3.2289e-03, + 1.7576e-02, 2.9238e-03, -4.2306e-03, -1.0550e-02, -1.4051e-02, + -3.8977e-03, -8.1430e-04, -1.2650e-02, 1.3809e-02, -8.0820e-04, + -4.5854e-03, -6.3907e-04, -8.4673e-03, -2.4680e-03, -1.2708e-02, + -2.0983e-03, -7.4221e-03, 8.2922e-03, -4.9823e-03, -7.6556e-03, + -5.0364e-03, 4.0367e-03, -9.1901e-05, -9.7083e-03, 1.4384e-02, + 1.0760e-02, 7.1655e-03, -4.6748e-03, -9.7282e-03, 1.4670e-04, + -1.4722e-02, 4.6902e-03, -3.5710e-03, 3.8599e-03, -3.1566e-03, + 3.2621e-03, -3.6439e-03, 9.1998e-03, -7.2703e-03, -6.5928e-04, + -4.5116e-03, -3.4974e-03, -1.0974e-02, -5.4826e-03, -1.9056e-02, + -1.0378e-03, 8.7362e-03, -5.1764e-03, 4.9589e-03, -1.1505e-02, + 1.7874e-03, -3.8352e-03, -5.9313e-03, -6.6084e-03, 5.5102e-06, + 1.4975e-03, 8.4882e-03, -9.1361e-03, -5.2143e-03, -1.4060e-02, + 4.9339e-03, 3.2891e-03, -3.3607e-03, 3.2524e-03, -6.3174e-03, + 2.5791e-03, -5.6568e-03, 4.8107e-03, -6.0872e-04, -1.1786e-03, + -8.0384e-03, 2.1064e-04, -1.0022e-02, 8.1678e-03, -9.4084e-03, + -4.9153e-04, 1.0709e-03, -1.2021e-03, -3.1174e-03, -2.5109e-03, + -8.5716e-03, -2.7402e-04, -1.0170e-02, 2.8354e-03, -8.6437e-04, + -5.7341e-03, -8.2829e-03, -4.0680e-04, -1.2334e-03, -1.3218e-03, + -4.4228e-03, 1.2199e-02, -2.9424e-03, 1.3184e-03, -1.7104e-04, + -7.2554e-04, -1.5282e-03, -6.1526e-03, -1.0304e-02, 1.3909e-03, + -1.2050e-03, -7.0577e-03, -1.0187e-02, -8.5626e-03, -8.0855e-03, + -9.8033e-03, -1.3769e-02], device='cuda:0')), ('module.transform_net.bn4.running_mean', tensor([ 1.3846, 0.3791, -1.2579, 0.5797, 0.5379, 0.5862, -0.6994, -0.1435, + 1.9386, -0.8666, -0.9643, -0.4262, 1.0248, 0.0383, 0.1987, -1.1767, + 0.2006, -1.0006, 0.4396, 0.6339, -0.3293, -0.0103, -1.0070, -1.0138, + 0.2848, 1.0191, -0.6309, 0.6005, -0.8816, -0.7623, -0.9148, -1.5641, + -0.1574, -0.1239, 0.3986, -0.8501, -0.5580, 0.3943, -1.3399, -0.1028, + -0.9182, 0.7140, 0.3032, 0.6446, 0.8573, 0.4363, 0.6164, -0.8817, + 0.2651, -0.6657, 1.1605, -0.0214, -1.0233, -0.8709, -0.5907, -0.0179, + -0.5879, 1.0229, -0.1029, 0.1420, -0.0870, -0.4103, -1.1147, -1.5024, + 0.6980, -0.7380, 0.3307, 0.3414, -0.8002, 0.4672, -1.0711, -0.2277, + -0.8896, -1.7636, 0.6828, -1.2372, 0.1653, -0.5758, -1.5190, -0.0973, + 0.5021, -0.2589, -0.2144, 0.5816, -0.0879, 1.5125, -0.9006, -1.1552, + 0.0476, -0.7799, -0.4448, -0.8629, -1.0244, 0.0465, -0.0535, -1.2342, + -0.7936, -0.9909, -1.5834, 0.2867, -1.6324, 0.2118, -0.8312, 0.8598, + -1.1061, -0.6782, -0.9818, -1.1597, 0.8100, -0.3156, 0.7429, 0.2352, + -0.0118, -0.5629, -0.1973, -0.0712, -0.1455, 0.5346, -0.1444, -1.7542, + -0.4581, -0.7950, -1.0640, -0.9607, 0.3857, -0.4736, -0.5261, -0.0599, + 0.4659, 0.8032, 0.6476, 1.0445, 0.0727, -0.9337, -0.2910, 0.6287, + 1.3183, -0.1227, -0.5724, -0.8959, 0.9791, 0.5578, -0.4495, -1.0186, + 0.2568, 0.6571, -0.1197, -0.2360, 1.1357, -0.6626, -0.7328, -0.6374, + -0.6378, -0.9770, 0.7805, -0.5452, 1.0972, 0.7038, -0.0719, -0.5606, + -0.5990, -0.2898, -0.1745, -0.9906, -0.0532, -0.4820, -1.3240, -0.6485, + 0.3247, -0.3379, -1.5901, 0.3512, -0.2575, 0.4038, -0.1406, -0.9939, + -0.8264, 0.0411, -1.5807, -0.3760, -0.8143, 0.2323, 0.8040, -0.2499, + 0.1145, -0.4502, -0.3047, -0.8964, 0.1543, 1.0370, -0.7117, -1.6888, + -0.0654, -1.3497, 0.0559, -0.6492, -0.0149, 1.4589, 1.0511, 0.3552, + -0.4917, 0.2860, -1.3581, 1.4052, -1.1612, -0.8957, -0.4510, 0.1733, + -0.9503, 0.9017, 0.2097, 2.1777, -0.6209, 0.1728, 1.0119, -0.0727, + -1.0104, -1.1150, -0.6559, 0.3458, -0.8603, 0.4438, 0.5709, -0.6500, + 0.7006, -0.2563, -0.1432, -0.9258, 1.1661, 0.7150, -1.9739, 0.1315, + 0.9076, 0.1128, -0.1492, 0.1047, -0.8765, 0.1953, 0.1666, -0.7464, + 0.5562, 0.2854, -1.1540, 0.3121, -2.3180, -0.7382, -0.3625, 0.5054, + -0.7407, 0.3191, 0.1033, -0.8726, 0.3143, 0.1319, -0.1037, 0.8283, + -0.3239, -0.0290, -1.0652, -1.0317, -1.1461, -0.5652, 0.2639, 0.5076, + -0.6987, 0.2498, -0.9470, -0.8741, -0.2377, 0.0509, 0.3934, 0.1747, + -1.0209, 0.4240, -0.0173, -0.6537, 0.6315, 0.4363, -1.6090, 0.6708, + -1.1385, 0.8353, -1.3124, -0.6993, -1.0359, 0.5033, -0.4576, -0.7395, + -1.4581, -0.4242, -0.5125, -0.9128, 0.5844, 0.1183, -0.2559, -1.0115, + -0.8819, -0.8674, 0.5301, -0.0873, -0.7631, 0.8573, 0.8579, -0.1081, + -0.7786, -0.2599, 1.3330, 0.4517, -0.8188, -1.0404, -0.2062, -0.7967, + -0.9809, -0.2822, 0.7821, 0.0101, 0.1432, -0.0458, -0.5833, 0.1898, + -0.9757, -0.5176, -0.9574, 1.0107, 0.3388, 0.1133, 0.7074, -0.4062, + 0.1487, -0.6999, -1.5619, 0.8091, 0.5814, 1.2234, 0.2701, 0.4857, + -0.1190, -1.2528, -0.8753, 0.0242, -0.7341, -0.0230, -0.3448, -0.9010, + 0.4679, -0.5459, 0.2506, -0.1416, -0.3732, 0.5960, 0.9937, 0.5126, + 0.7531, -1.1408, -1.6077, -1.4082, 0.6318, -0.7399, 0.2618, 0.3879, + 0.0907, 0.8005, -1.0563, -0.8611, -0.0247, 1.3604, 0.0717, -0.5363, + 0.6501, 0.7204, -1.4536, 1.7639, 0.5723, 0.5528, 0.0298, 0.0440, + -0.9526, 0.6977, -0.8631, 1.1701, 0.0582, 0.5315, -0.9016, 0.6517, + 0.1819, 0.2466, -1.0774, 0.5293, 0.1675, 1.1969, -0.3298, 0.2142, + 0.6571, -0.5429, -0.4180, 0.2063, 0.6750, 0.5993, -1.7575, -0.3268, + 0.4420, -0.9859, -0.4034, 0.7467, 0.7946, -0.7469, -0.1429, -0.0684, + -0.9978, 0.0473, 0.2083, 0.4770, -1.9778, 0.3204, 0.4311, 0.5129, + 0.9304, 0.5808, 0.5324, -0.9754, -1.6062, -0.6528, 0.1965, 1.0209, + -0.0055, -0.5902, -0.7344, 0.2850, -0.7491, -0.1880, 0.4241, 0.8423, + -0.4947, -0.6353, -0.7455, -1.0138, 0.2851, 0.4234, 0.5929, -0.1053, + 0.1159, -1.0744, -0.2650, -1.9706, -0.2254, 0.1585, 0.7394, -0.1163, + -0.9731, -0.5936, -0.3328, -0.3136, 0.1953, 0.1898, -0.7502, -0.0381, + 1.0446, 1.6166, 0.1729, 0.7244, -0.0938, -0.5298, 2.0459, -0.5574, + -1.1794, 1.5907, 0.2143, 0.3390, 0.4435, 0.3542, 0.9506, -0.7099, + -0.2261, 0.2658, -1.3909, 0.5360, 0.7028, -1.6657, 0.8541, -1.1339, + 1.3057, 0.5707, 0.8159, -0.0698, -0.6880, 0.0230, 0.9106, -0.3132, + 0.7453, 0.8598, -0.7935, -0.5592, -1.1228, 0.7572, 0.6956, -1.0777, + 0.5146, -1.7566, -0.9408, -0.1280, -0.5042, -0.9676, -0.3953, -0.3619, + 0.5049, -0.2248, -1.2081, 0.2702, -1.0590, -0.7350, 0.3899, -0.9716], + device='cuda:0')), ('module.transform_net.bn4.running_var', tensor([1.5695e-03, 3.9582e-04, 1.3202e-03, 5.5844e-04, 7.4619e-04, 1.3115e-03, + 4.3950e-04, 1.9472e-04, 2.8156e-03, 6.2792e-04, 9.4869e-04, 2.0219e-04, + 1.1018e-03, 1.0358e-04, 2.8407e-04, 1.0641e-03, 8.4092e-05, 7.6733e-04, + 3.0113e-04, 4.7541e-04, 4.9637e-04, 8.3330e-05, 1.3026e-03, 8.0267e-04, + 5.6551e-04, 1.0456e-03, 4.0252e-04, 4.7551e-04, 6.8894e-04, 4.7212e-04, + 5.7146e-04, 2.1128e-03, 2.6991e-04, 2.4488e-04, 2.8189e-04, 6.7160e-04, + 2.9400e-04, 3.3165e-04, 1.2713e-03, 1.3045e-04, 7.2616e-04, 6.6785e-04, + 4.6842e-04, 6.8373e-04, 8.7422e-04, 2.7112e-04, 4.4672e-04, 7.3257e-04, + 2.3844e-04, 3.3542e-04, 9.5477e-04, 3.6830e-04, 7.9540e-04, 7.5180e-04, + 4.3165e-04, 1.3818e-04, 6.6544e-04, 1.0070e-03, 2.4134e-04, 2.5148e-04, + 1.5391e-04, 3.2198e-04, 1.0894e-03, 2.5728e-03, 1.0050e-03, 6.7884e-04, + 6.1586e-04, 4.8614e-04, 7.9136e-04, 3.6648e-04, 9.3733e-04, 3.4599e-04, + 7.7051e-04, 2.1176e-03, 6.9718e-04, 1.4744e-03, 2.6233e-04, 8.9843e-04, + 1.8264e-03, 4.2626e-04, 5.9542e-04, 1.5070e-04, 5.0984e-04, 3.1926e-04, + 2.8556e-04, 2.1909e-03, 1.2449e-03, 1.3808e-03, 3.8587e-04, 5.1673e-04, + 5.5066e-04, 7.3283e-04, 8.1882e-04, 1.4139e-04, 2.1772e-04, 1.4376e-03, + 5.5495e-04, 1.0513e-03, 2.0457e-03, 1.2106e-04, 2.3735e-03, 3.4427e-04, + 7.6259e-04, 9.7756e-04, 1.4086e-03, 4.0121e-04, 9.1948e-04, 1.2265e-03, + 1.1327e-03, 2.3699e-04, 9.3326e-04, 3.0925e-04, 3.5736e-04, 6.8804e-04, + 1.1416e-04, 1.7721e-04, 4.6956e-04, 4.1914e-04, 1.9947e-04, 2.3200e-03, + 8.4228e-04, 4.7694e-04, 8.5183e-04, 7.1368e-04, 7.0851e-04, 2.9740e-04, + 4.3624e-04, 6.7087e-04, 3.6727e-04, 4.9172e-04, 5.5549e-04, 1.1021e-03, + 1.7211e-04, 7.6491e-04, 7.7500e-05, 4.0135e-04, 1.2244e-03, 8.2942e-05, + 4.3884e-04, 7.7331e-04, 8.7427e-04, 3.5006e-04, 5.0253e-04, 9.7950e-04, + 3.4737e-04, 4.2502e-04, 2.2303e-04, 4.3712e-04, 8.4925e-04, 5.9945e-04, + 1.8884e-04, 4.2125e-04, 4.6807e-04, 9.7258e-04, 7.8761e-04, 6.0511e-04, + 1.0118e-03, 7.2619e-04, 1.8846e-04, 3.6605e-04, 2.3817e-04, 4.6912e-04, + 4.8029e-04, 8.2306e-04, 8.0524e-05, 6.0155e-04, 1.0713e-03, 4.7962e-04, + 2.5857e-04, 3.0878e-04, 1.7828e-03, 9.7035e-04, 4.9430e-05, 5.7749e-04, + 3.0802e-04, 8.4100e-04, 1.0379e-03, 2.8095e-04, 2.1924e-03, 3.6083e-04, + 6.2434e-04, 3.3923e-04, 4.4899e-04, 3.2535e-04, 4.1618e-04, 2.4980e-04, + 4.2705e-04, 8.1411e-04, 2.1477e-04, 6.4430e-04, 7.0239e-04, 2.3452e-03, + 2.4475e-04, 1.8268e-03, 6.7771e-05, 6.4119e-04, 1.0534e-04, 1.4743e-03, + 6.4047e-04, 5.5106e-04, 8.2947e-04, 4.4284e-04, 1.3097e-03, 1.3599e-03, + 9.4519e-04, 8.8501e-04, 2.2776e-04, 2.5571e-04, 1.0127e-03, 8.6240e-04, + 1.4298e-04, 4.7103e-03, 3.1682e-04, 1.4626e-04, 9.6519e-04, 3.3686e-04, + 7.4843e-04, 8.3557e-04, 7.8022e-04, 1.1750e-04, 8.7763e-04, 2.2908e-04, + 3.2646e-04, 4.6898e-04, 7.3524e-04, 3.9812e-04, 1.3695e-04, 9.9409e-04, + 1.0391e-03, 4.3078e-04, 2.9873e-03, 3.7010e-04, 8.8658e-04, 5.3056e-04, + 2.4986e-04, 5.1221e-04, 5.5012e-04, 3.1570e-04, 1.5102e-04, 4.9216e-04, + 6.2422e-04, 6.7346e-04, 1.0816e-03, 2.1787e-04, 6.4167e-03, 5.0129e-04, + 1.8595e-04, 7.4548e-04, 5.7802e-04, 3.7884e-04, 3.9926e-04, 5.8354e-04, + 9.9225e-05, 6.9021e-04, 2.7229e-04, 1.0926e-03, 1.7719e-04, 3.1635e-04, + 1.0942e-03, 1.2968e-03, 1.3673e-03, 5.6549e-04, 2.4667e-04, 5.6172e-04, + 4.8518e-04, 2.4213e-04, 5.7495e-04, 5.5294e-04, 3.7417e-04, 1.7898e-04, + 2.8995e-04, 1.8460e-04, 1.0992e-03, 4.5336e-04, 3.2332e-04, 4.0504e-04, + 2.7404e-04, 5.3012e-04, 1.9549e-03, 3.4135e-04, 1.0606e-03, 7.1962e-04, + 1.3558e-03, 7.4288e-04, 1.1028e-03, 1.9071e-04, 1.4956e-04, 4.4969e-04, + 1.4816e-03, 1.8448e-04, 3.3141e-04, 7.9356e-04, 4.3711e-04, 1.6892e-04, + 6.1325e-05, 8.9912e-04, 7.6596e-04, 6.3806e-04, 2.3485e-04, 2.2833e-04, + 5.1537e-04, 9.9792e-04, 7.1984e-04, 3.2187e-04, 7.1996e-04, 2.9822e-04, + 1.5302e-03, 2.7023e-04, 7.7022e-04, 1.1287e-03, 1.8956e-04, 6.8303e-04, + 6.8289e-04, 2.9356e-04, 6.1974e-04, 2.5039e-04, 9.3713e-05, 4.5460e-04, + 3.3044e-04, 2.7294e-04, 8.0571e-04, 3.5275e-04, 9.0541e-04, 1.1900e-03, + 5.4995e-04, 3.4445e-04, 7.6419e-04, 2.3613e-04, 6.3344e-04, 7.4089e-04, + 1.6673e-03, 8.4425e-04, 3.4938e-04, 9.2536e-04, 2.3889e-04, 3.4339e-04, + 1.6866e-04, 1.2127e-03, 6.6321e-04, 2.1923e-04, 8.8280e-04, 2.7778e-04, + 6.7218e-05, 8.2815e-04, 3.2900e-04, 8.7835e-04, 3.4425e-04, 1.3813e-04, + 2.7819e-04, 4.3603e-04, 9.4766e-04, 6.1539e-04, 5.4065e-04, 1.0044e-03, + 1.7724e-03, 1.7025e-03, 5.9627e-04, 6.3647e-04, 8.2093e-05, 3.7259e-04, + 2.2552e-04, 7.8894e-04, 1.0618e-03, 6.1167e-04, 4.4699e-04, 9.0417e-04, + 7.0063e-05, 4.9443e-04, 5.5567e-04, 6.7120e-04, 1.8991e-03, 1.8430e-03, + 5.7073e-04, 5.2635e-04, 1.1052e-04, 6.3615e-05, 8.9393e-04, 2.5887e-04, + 5.4603e-04, 9.0823e-04, 3.4128e-04, 2.2959e-04, 6.7301e-04, 9.5764e-04, + 2.6692e-04, 4.4463e-04, 9.7086e-04, 2.6991e-04, 8.4940e-05, 1.3235e-03, + 3.6432e-04, 5.0436e-04, 4.9330e-04, 4.6661e-04, 5.3467e-04, 4.2740e-04, + 3.5796e-04, 5.1457e-04, 2.7996e-03, 4.1126e-04, 2.2321e-04, 6.1177e-04, + 3.0829e-04, 5.6858e-04, 6.1285e-04, 4.1788e-04, 2.9626e-04, 3.2360e-04, + 1.0253e-03, 2.8534e-04, 3.8217e-04, 3.0808e-04, 2.3364e-03, 5.7635e-04, + 3.8526e-04, 4.5106e-04, 9.2826e-04, 8.6810e-04, 5.6598e-04, 8.9093e-04, + 2.3081e-03, 8.0347e-04, 1.2860e-04, 1.4341e-03, 4.1078e-04, 4.2050e-04, + 3.4355e-04, 2.1509e-04, 7.8061e-04, 1.8604e-04, 2.2004e-04, 7.6658e-04, + 3.9806e-04, 6.0532e-04, 6.9846e-04, 1.0265e-03, 3.2794e-04, 5.4563e-04, + 4.1312e-04, 8.7194e-05, 1.6393e-04, 8.0111e-04, 5.8264e-04, 3.5698e-03, + 1.6479e-04, 4.4325e-04, 4.7795e-04, 2.3539e-04, 7.9393e-04, 3.5731e-04, + 6.0475e-04, 5.2331e-04, 2.3347e-04, 2.6124e-04, 5.4213e-04, 2.0881e-04, + 6.5966e-04, 2.1343e-03, 1.0078e-04, 4.8517e-04, 5.8185e-05, 7.8588e-04, + 3.8747e-03, 2.8185e-04, 1.2741e-03, 2.3792e-03, 2.3610e-04, 3.2002e-04, + 3.7612e-04, 4.1969e-04, 6.5111e-04, 1.1362e-03, 1.1511e-04, 4.7587e-04, + 1.6489e-03, 4.1450e-04, 4.9659e-04, 2.0087e-03, 8.9178e-04, 1.0742e-03, + 1.4866e-03, 5.0323e-04, 7.1145e-04, 1.0990e-04, 4.5288e-04, 2.8278e-04, + 7.9851e-04, 3.4776e-04, 7.0937e-04, 7.5080e-04, 6.9368e-04, 4.6551e-04, + 1.0822e-03, 4.5567e-04, 3.6313e-04, 1.0714e-03, 6.9251e-04, 2.0749e-03, + 6.3040e-04, 5.2935e-04, 4.4345e-04, 7.1831e-04, 4.0170e-04, 3.2502e-04, + 3.6725e-04, 1.7538e-04, 1.3996e-03, 2.7395e-04, 1.1680e-03, 4.3449e-04, + 5.2162e-04, 7.2307e-04], device='cuda:0')), ('module.transform_net.bn4.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.transform_net.linear2.weight', tensor([[-0.0017, -0.0023, -0.0198, ..., -0.0241, 0.0127, 0.0156], + [ 0.0027, 0.0015, -0.0070, ..., -0.0032, -0.0131, 0.0102], + [-0.0072, -0.0093, 0.0013, ..., 0.0059, -0.0093, -0.0042], + ..., + [-0.0059, -0.0067, -0.0062, ..., 0.0081, 0.0097, -0.0044], + [-0.0126, -0.0018, -0.0063, ..., -0.0111, -0.0105, -0.0159], + [-0.0341, -0.0405, 0.0246, ..., -0.0196, 0.0140, -0.0008]], + device='cuda:0')), ('module.transform_net.bn5.weight', tensor([0.5649, 0.4819, 0.4244, 0.5036, 0.6632, 0.4734, 0.7601, 0.6363, 0.6647, + 0.5677, 0.4276, 0.6597, 0.6278, 0.5423, 0.4997, 0.3692, 0.4918, 0.4679, + 0.4791, 0.4511, 0.5856, 0.4397, 0.4909, 0.6046, 0.4701, 0.5312, 0.5060, + 0.6203, 0.4142, 0.5347, 0.4423, 0.4182, 0.5468, 0.5187, 0.5616, 0.4037, + 0.4750, 0.6016, 0.5441, 0.5896, 0.5955, 0.4499, 0.4758, 0.4709, 0.5354, + 0.5858, 0.5739, 0.6551, 0.5809, 0.5338, 0.4564, 0.5809, 0.5540, 0.5508, + 0.5784, 0.5874, 0.5034, 0.5160, 0.5495, 0.5302, 0.4885, 0.5566, 0.7045, + 0.4139, 0.4704, 0.5311, 0.6431, 0.5489, 0.6417, 0.4164, 0.6889, 0.5252, + 0.4958, 0.5134, 0.6834, 0.3103, 0.6753, 0.5063, 0.6411, 0.5190, 0.5299, + 0.3645, 0.5846, 0.4782, 0.5979, 0.5437, 0.5916, 0.5866, 0.5979, 0.4889, + 0.5488, 0.5110, 0.4051, 0.3883, 0.5293, 0.6579, 0.7347, 0.3491, 0.5769, + 0.5488, 0.5406, 0.5578, 0.4282, 0.5449, 0.5027, 0.3858, 0.4046, 0.3793, + 0.4987, 0.5210, 0.5970, 0.5980, 0.6693, 0.5220, 0.5508, 0.5786, 0.5477, + 0.4464, 0.5350, 0.5423, 0.5155, 0.3971, 0.6049, 0.4263, 0.3758, 0.5688, + 0.4521, 0.5832, 0.6545, 0.6899, 0.4562, 0.5570, 0.5259, 0.5797, 0.5659, + 0.6002, 0.5365, 0.4209, 0.5584, 0.6853, 0.5495, 0.3895, 0.5617, 0.4890, + 0.5198, 0.5365, 0.6089, 0.5305, 0.4839, 0.6258, 0.4303, 0.4026, 0.6036, + 0.4461, 0.3865, 0.5465, 0.3958, 0.6547, 0.4943, 0.6441, 0.6173, 0.3932, + 0.4584, 0.6915, 0.6006, 0.5832, 0.5978, 0.4348, 0.3244, 0.4529, 0.5386, + 0.4461, 0.4736, 0.4689, 0.5392, 0.6149, 0.3500, 0.6060, 0.5943, 0.6492, + 0.4142, 0.6890, 0.5273, 0.7054, 0.4544, 0.4209, 0.5173, 0.5793, 0.6466, + 0.5700, 0.5392, 0.5420, 0.3339, 0.5194, 0.4851, 0.5076, 0.3752, 0.5739, + 0.5627, 0.4962, 0.6410, 0.5787, 0.5789, 0.5265, 0.6600, 0.7250, 0.6352, + 0.6118, 0.4412, 0.5827, 0.5883, 0.5853, 0.4827, 0.5342, 0.5355, 0.4954, + 0.6764, 0.6474, 0.7026, 0.5247, 0.4721, 0.4691, 0.5453, 0.5355, 0.5937, + 0.4976, 0.3802, 0.5509, 0.6223, 0.6130, 0.6339, 0.7018, 0.4889, 0.6565, + 0.5836, 0.6621, 0.6758, 0.5180, 0.3873, 0.6006, 0.4444, 0.4681, 0.4751, + 0.5940, 0.5071, 0.5214, 0.5640, 0.5080, 0.4454, 0.5925, 0.5086, 0.5375, + 0.5849, 0.4762, 0.5213, 0.6339], device='cuda:0')), ('module.transform_net.bn5.bias', tensor([-0.0822, -0.0854, -0.1044, -0.0060, -0.0118, -0.0906, 0.0281, -0.0564, + -0.0540, -0.0617, -0.0861, -0.0283, -0.0169, -0.0414, -0.0659, -0.1270, + -0.0632, -0.0269, -0.1003, -0.0794, -0.0652, -0.0620, -0.0827, -0.0119, + -0.0573, -0.1570, -0.0813, 0.0356, -0.0813, -0.0742, -0.0779, -0.0509, + -0.0850, -0.0723, -0.0457, -0.0798, -0.0665, -0.1020, -0.0697, -0.0077, + -0.0189, -0.1365, -0.0407, -0.1061, -0.1199, 0.0031, -0.0665, -0.0812, + -0.0290, -0.0888, -0.0812, -0.1000, -0.0273, -0.0104, -0.0396, -0.0386, + -0.0967, 0.0169, -0.0049, -0.0911, -0.1103, -0.0467, 0.0496, -0.0995, + -0.0774, -0.1071, -0.0625, 0.0214, -0.1008, -0.0986, -0.0275, -0.1097, + -0.0227, -0.0605, -0.0889, -0.0925, -0.1070, -0.0663, -0.0410, -0.1003, + -0.0998, -0.0937, -0.0244, -0.1314, -0.0078, -0.0588, -0.0614, -0.1189, + -0.0358, -0.0982, -0.0908, -0.0403, -0.0997, -0.1195, -0.0653, -0.0861, + -0.0410, -0.0794, -0.0386, -0.0423, -0.1409, -0.0284, -0.0674, -0.0959, + -0.1132, -0.0690, -0.1199, -0.1059, -0.0132, -0.0997, -0.0363, -0.1257, + -0.0363, -0.1115, -0.0759, 0.0024, -0.0369, -0.0482, -0.1056, -0.0727, + -0.1065, -0.1242, -0.0975, -0.1110, -0.1086, -0.0352, -0.0552, -0.0744, + -0.0854, 0.0071, -0.0901, -0.0297, -0.0954, -0.1063, -0.0565, -0.0334, + -0.1075, -0.1135, -0.0601, -0.0216, 0.0081, -0.1152, -0.0498, -0.0316, + -0.1042, -0.1051, -0.0170, -0.0806, -0.0912, 0.0093, -0.1400, -0.1083, + -0.0644, -0.1170, -0.1121, -0.0248, -0.0916, -0.0777, -0.0961, -0.0354, + 0.0338, -0.0743, -0.0818, 0.0093, -0.0777, -0.0586, -0.0318, -0.0816, + -0.1139, -0.0659, -0.0553, -0.1050, -0.0431, -0.0669, -0.0715, -0.0008, + -0.0943, 0.0227, -0.0362, 0.0114, -0.1150, -0.0704, -0.0622, 0.0243, + -0.0790, -0.1034, -0.0994, -0.0871, 0.0077, 0.0073, -0.0839, -0.0937, + -0.1138, -0.0372, -0.0715, -0.0857, -0.0893, -0.0083, -0.1444, -0.0667, + -0.0875, -0.0107, -0.0306, -0.0965, -0.0125, -0.0080, -0.0326, -0.0067, + -0.1109, -0.1093, -0.0749, -0.0874, -0.0754, -0.0358, -0.0920, -0.0462, + 0.0132, -0.0263, -0.0267, -0.1067, -0.0910, -0.0031, -0.0843, -0.0739, + -0.0290, -0.0767, -0.0845, -0.0560, -0.0140, -0.0849, -0.1299, -0.0409, + -0.0884, 0.0294, -0.0802, -0.0981, 0.0029, -0.1137, -0.0889, -0.0027, + -0.0725, -0.1215, -0.0816, -0.0800, -0.0626, -0.0734, -0.0793, -0.0931, + -0.0973, 0.0199, -0.0051, -0.0849, -0.0208, -0.0735, -0.0257, 0.0202], + device='cuda:0')), ('module.transform_net.bn5.running_mean', tensor([-0.1907, -0.1665, -0.1274, -0.0445, -0.1298, -0.0233, 0.0471, -0.1583, + -0.2220, -0.0551, -0.1874, -0.0130, -0.0183, -0.1813, -0.2129, -0.1132, + -0.0352, -0.2791, -0.1537, -0.1147, -0.2039, -0.1906, -0.1411, -0.0158, + -0.0189, 0.5731, -0.0967, -0.2547, -0.0950, -0.1342, -0.1957, -0.0950, + -0.1377, -0.1522, -0.0456, -0.2101, -0.1931, -0.1934, -0.2359, -0.0511, + -0.0723, -0.1074, -0.2131, -0.1332, -0.1269, -0.0475, -0.1928, -0.1760, + -0.1010, -0.2610, -0.1284, -0.1688, -0.0697, -0.0702, 0.0354, -0.0825, + -0.1954, -0.5271, -0.4318, -0.1669, -0.1487, -0.2423, 0.0342, -0.1815, + -0.1539, -0.1629, -0.1193, -0.4060, -0.1709, -0.0984, -0.1054, -0.1635, + -0.3571, -0.0970, -0.1537, -0.0709, -0.2172, -0.0717, -0.1754, 0.1220, + -0.1354, -0.1824, -0.0772, -0.1799, -0.2067, -0.2536, -0.1387, -0.1374, + -0.3254, -0.1956, -0.1751, -0.2133, -0.1772, -0.1658, -0.2411, -0.1900, + 0.0017, -0.0891, -0.0481, -0.1481, -0.1586, -0.0376, -0.3243, -0.1401, + -0.1064, -0.2745, -0.1160, -0.1496, -0.3453, -0.1511, -0.1001, -0.1920, + -0.1215, -0.1724, -0.1219, -0.1978, 0.0192, -0.1442, -0.1316, -0.1678, + -0.1524, -0.1736, -0.1636, -0.1828, -0.2777, -0.0535, 0.0080, -0.1458, + -0.1821, 0.0025, -0.1499, 0.0170, -0.1536, -0.1355, -0.0078, -0.1642, + -0.1816, -0.1092, -0.1835, -0.1603, -0.1468, -0.1275, -0.1425, -0.2271, + -0.2046, -0.1411, 0.0347, -0.2097, -0.1524, 0.0463, -0.1424, -0.1622, + -0.1507, -0.1642, -0.1422, -0.0917, -0.1720, -0.0978, -0.1761, -0.2455, + -0.5199, -0.1427, -0.1479, -0.0413, -0.1662, -0.1385, -0.2218, -0.1655, + -0.1193, -0.1710, -0.0753, -0.1387, -0.3384, -0.1495, -0.2323, 0.0704, + -0.2205, -0.3348, -0.0960, -0.2696, -0.1414, -0.1722, -0.0871, 0.0122, + -0.1505, -0.1273, -0.1703, -0.1743, -0.1356, 0.0584, -0.1620, -0.1739, + -0.1392, -0.0388, -0.1516, -0.1481, -0.1305, 0.0142, -0.1856, -0.0622, + -0.1188, -0.3443, -0.3286, -0.1881, -0.0367, -0.0036, -0.1523, 0.0417, + -0.1569, -0.1649, -0.1732, -0.2180, -0.2371, -0.1528, -0.1854, -0.2404, + -0.1383, -0.0900, -0.0565, -0.2010, -0.1466, -0.4243, -0.1645, -0.0777, + -0.1294, -0.1713, -0.2386, -0.0283, -0.1120, -0.1037, -0.1372, -0.0540, + -0.1079, -0.0400, -0.2525, -0.1734, -0.0203, -0.1467, -0.1556, -0.1734, + -0.2649, -0.1328, 0.0190, -0.1469, -0.2624, -0.1502, -0.1802, -0.1853, + -0.1536, -0.4934, -0.2165, -0.1153, -0.2160, -0.2166, -0.2682, 0.0173], + device='cuda:0')), ('module.transform_net.bn5.running_var', tensor([0.0254, 0.0546, 0.0330, 0.0652, 0.0314, 0.0763, 0.2553, 0.0676, 0.0615, + 0.0645, 0.0638, 0.0399, 0.1777, 0.0676, 0.0717, 0.0512, 0.0324, 0.0469, + 0.0519, 0.1126, 0.0295, 0.0519, 0.0386, 0.1122, 0.0280, 0.0668, 0.0332, + 0.0748, 0.0421, 0.0866, 0.0674, 0.0842, 0.0395, 0.0392, 0.0914, 0.0774, + 0.0432, 0.0160, 0.0859, 0.1210, 0.0732, 0.0332, 0.0770, 0.0544, 0.0670, + 0.1353, 0.1493, 0.0793, 0.0847, 0.0605, 0.0314, 0.0541, 0.0475, 0.1042, + 0.0660, 0.0943, 0.0460, 0.1534, 0.1356, 0.0369, 0.0785, 0.0744, 0.1781, + 0.0273, 0.0132, 0.0425, 0.0528, 0.1415, 0.0782, 0.0158, 0.0967, 0.0795, + 0.1460, 0.0274, 0.1050, 0.0153, 0.1381, 0.0353, 0.0973, 0.0603, 0.0502, + 0.0486, 0.1513, 0.0281, 0.0801, 0.0691, 0.0311, 0.0303, 0.0491, 0.0355, + 0.0925, 0.0909, 0.0332, 0.0476, 0.0557, 0.1230, 0.1594, 0.0273, 0.0778, + 0.0713, 0.0589, 0.1884, 0.0419, 0.0580, 0.0196, 0.0643, 0.0491, 0.0601, + 0.1227, 0.0287, 0.0809, 0.0692, 0.0406, 0.0344, 0.0483, 0.1052, 0.0721, + 0.0609, 0.0547, 0.0379, 0.0324, 0.0583, 0.0492, 0.0411, 0.0335, 0.0931, + 0.0458, 0.0559, 0.1119, 0.1099, 0.0514, 0.0619, 0.0952, 0.0711, 0.0424, + 0.0755, 0.0333, 0.0353, 0.0562, 0.0444, 0.1513, 0.0486, 0.0482, 0.0429, + 0.0424, 0.0535, 0.0545, 0.0819, 0.0213, 0.0718, 0.0538, 0.0593, 0.0507, + 0.0644, 0.0575, 0.0723, 0.0277, 0.0718, 0.0815, 0.0579, 0.1086, 0.0543, + 0.0405, 0.1576, 0.0329, 0.0607, 0.0930, 0.0444, 0.0370, 0.0340, 0.0624, + 0.0607, 0.0407, 0.0777, 0.0214, 0.1286, 0.0564, 0.0908, 0.0653, 0.0894, + 0.0306, 0.1363, 0.0820, 0.0606, 0.0366, 0.0317, 0.0451, 0.0353, 0.0900, + 0.1723, 0.0550, 0.0574, 0.0159, 0.0818, 0.0453, 0.0359, 0.0214, 0.0967, + 0.1204, 0.0106, 0.0481, 0.1105, 0.0325, 0.0649, 0.1946, 0.1088, 0.0615, + 0.0942, 0.0519, 0.0737, 0.0579, 0.1056, 0.0634, 0.0356, 0.0324, 0.0575, + 0.2472, 0.0866, 0.2049, 0.0535, 0.0375, 0.0637, 0.0361, 0.0585, 0.0477, + 0.0728, 0.0356, 0.0305, 0.0798, 0.0971, 0.0822, 0.0840, 0.0306, 0.2827, + 0.0946, 0.0849, 0.0713, 0.0623, 0.0296, 0.1018, 0.0437, 0.0680, 0.0354, + 0.0359, 0.0599, 0.0218, 0.0338, 0.0174, 0.0220, 0.1846, 0.1450, 0.0407, + 0.0597, 0.0408, 0.1304, 0.3357], device='cuda:0')), ('module.transform_net.bn5.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.transform_net.transform.weight', tensor([[-3.0557e-03, -5.3793e-04, 6.1327e-04, ..., 4.5172e-04, + 2.9316e-03, 8.8432e-03], + [-1.4762e-04, 3.2826e-04, 5.3487e-04, ..., -5.9285e-06, + -3.0186e-03, -3.9271e-03], + [ 7.1567e-04, 2.0511e-04, 7.3607e-05, ..., 4.6573e-05, + -1.1012e-03, -9.2271e-04], + ..., + [ 1.9283e-03, 1.3962e-03, 1.3491e-03, ..., -2.0274e-03, + -3.3958e-03, -2.3002e-03], + [-3.2432e-04, -5.9608e-04, -3.6762e-04, ..., 5.9462e-05, + -8.7500e-04, -4.2871e-03], + [ 5.0549e-04, -7.9500e-04, -6.4896e-04, ..., 8.4175e-04, + 5.6262e-03, 8.0869e-03]], device='cuda:0')), ('module.transform_net.transform.bias', tensor([ 1.0557, -0.0533, -0.0246, -0.0051, 1.0118, 0.0571, 0.0161, -0.0662, + 1.2081], device='cuda:0')), ('module.bn1.weight', tensor([0.9574, 1.0045, 0.9917, 0.9858, 0.9488, 1.0210, 0.9428, 0.9855, 0.9808, + 1.0112, 0.9839, 0.9913, 0.9923, 1.0267, 0.9602, 0.9188, 0.9699, 0.9656, + 0.9738, 0.9653, 0.9604, 0.9641, 0.9548, 0.9873, 0.9911, 0.9564, 0.9748, + 0.9592, 0.9849, 0.9579, 0.9809, 0.9939, 0.9642, 0.9904, 1.0288, 1.0064, + 0.9692, 0.9552, 0.9671, 0.9583, 0.9429, 0.9853, 1.0013, 0.9701, 0.9813, + 0.9638, 0.9797, 1.0066, 1.0186, 0.9919, 0.9907, 0.9759, 0.9834, 0.9508, + 0.9867, 0.9827, 0.9874, 0.9730, 0.9319, 1.0060, 0.9537, 0.9960, 0.9770, + 0.9807], device='cuda:0')), ('module.bn1.bias', tensor([ 0.0305, 0.0291, -0.0768, 0.0243, -0.0372, -0.0718, -0.0121, -0.0137, + 0.0166, -0.0936, 0.0054, 0.0373, 0.0364, -0.0265, -0.0594, 0.0420, + 0.0227, 0.0182, 0.0428, -0.0645, 0.0070, -0.0112, -0.0146, -0.1356, + -0.1362, -0.0237, 0.0919, -0.0086, 0.0144, 0.0529, -0.0482, -0.0354, + -0.0457, -0.0564, -0.0926, 0.0037, 0.0063, -0.0396, 0.1050, -0.0602, + -0.0052, -0.0825, 0.0018, -0.0235, -0.0165, 0.0554, -0.0396, -0.1206, + -0.1034, -0.0087, -0.0356, 0.0425, -0.0442, 0.0613, -0.0266, -0.0372, + -0.0379, 0.0205, 0.0219, -0.0351, 0.0276, -0.0068, 0.0126, 0.0259], + device='cuda:0')), ('module.bn1.running_mean', tensor([ 0.4261, -0.2351, -0.0712, 0.0420, 0.0837, -0.0322, -0.7881, -0.0599, + 0.7213, -0.3721, -0.3978, 0.3686, -0.3069, -0.4192, 0.2384, -0.3626, + -0.4639, 0.3693, -0.1827, 0.2668, 0.1630, 0.0617, 0.1277, -0.6631, + 0.0885, -0.1010, 0.6567, 0.5049, 0.3823, 0.8031, 0.2140, 0.4090, + 0.1239, 0.0412, -0.2650, 0.1269, 0.4824, -0.3648, 0.1873, 0.0273, + 0.5330, -0.7423, -0.0107, -0.3586, -0.7670, 0.1593, 0.0037, -0.7477, + 0.0346, 0.1387, 0.3498, 0.2089, 0.5619, 0.3137, 0.4367, -0.0240, + -0.2615, 0.0421, 0.0293, -0.3421, -0.1336, 0.5503, 0.0372, -0.0710], + device='cuda:0')), ('module.bn1.running_var', tensor([0.2694, 0.0716, 0.1177, 0.0739, 0.0200, 0.0286, 0.3689, 0.0699, 0.4158, + 0.2523, 0.1464, 0.1374, 0.0668, 0.0903, 0.0504, 0.0566, 0.1869, 0.2308, + 0.2683, 0.2841, 0.0777, 0.1263, 0.0258, 0.3566, 0.0876, 0.0275, 0.1828, + 0.1993, 0.2554, 0.4271, 0.0754, 0.0740, 0.0749, 0.0596, 0.0277, 0.1697, + 0.1722, 0.2911, 0.0592, 0.0659, 0.3018, 0.4561, 0.0079, 0.0923, 0.2932, + 0.0394, 0.1422, 0.3889, 0.0259, 0.0194, 0.0697, 0.0820, 0.1818, 0.1561, + 0.1527, 0.0832, 0.2142, 0.0518, 0.0353, 0.0464, 0.0632, 0.1624, 0.0737, + 0.1207], device='cuda:0')), ('module.bn1.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.bn2.weight', tensor([0.9684, 0.9667, 0.9564, 0.9479, 0.9961, 0.9625, 0.9757, 0.9456, 0.9751, + 0.9444, 0.9595, 0.9667, 0.9565, 0.9626, 0.9481, 0.9734, 0.9405, 1.0031, + 0.9477, 0.9973, 0.9932, 0.9751, 1.0060, 0.9565, 0.9827, 0.9760, 0.9665, + 0.9774, 0.9421, 0.9769, 0.8953, 0.9879, 0.9662, 0.9784, 0.9913, 0.9729, + 0.9350, 0.9587, 0.9826, 0.9308, 0.9811, 1.0205, 0.9732, 0.9390, 0.9661, + 0.9299, 0.9724, 0.9757, 0.9669, 0.9653, 0.9339, 0.9573, 0.9675, 0.9864, + 0.9635, 0.9528, 0.9712, 0.9790, 0.9489, 0.9759, 0.9842, 0.9918, 0.9847, + 1.0030], device='cuda:0')), ('module.bn2.bias', tensor([-0.0045, -0.0266, 0.0105, -0.0438, -0.0005, -0.0032, -0.0288, -0.0133, + -0.0436, -0.0505, -0.0091, -0.0085, -0.0506, -0.0435, 0.0153, -0.0320, + -0.0064, -0.0247, -0.0292, -0.0906, 0.0223, -0.0192, -0.0435, 0.0024, + -0.0325, -0.0406, -0.0094, -0.0309, -0.0023, -0.0134, -0.1212, -0.0017, + 0.0083, -0.1009, -0.0380, -0.0313, -0.0720, -0.0227, -0.0228, -0.0550, + -0.0090, -0.0688, 0.0184, -0.0256, -0.0329, -0.0270, 0.0202, -0.0070, + 0.0145, 0.0047, -0.0547, -0.0024, 0.0222, -0.0281, -0.0149, 0.0228, + 0.0315, -0.0178, -0.0680, -0.0733, -0.0047, -0.0339, -0.0558, -0.0382], + device='cuda:0')), ('module.bn2.running_mean', tensor([-0.2502, -0.2023, -0.3157, 0.0034, -0.2458, -0.3484, 0.4152, -0.1173, + -0.3223, -0.1533, 0.1601, 0.0346, -0.2799, 0.0175, -0.2840, 0.3348, + -0.1910, 0.7102, 0.4342, -0.3117, 0.0183, 0.1721, 0.4742, 0.0622, + -0.2099, -0.1441, 0.3306, -0.2447, -0.2162, -0.2859, 0.0899, 0.0469, + -0.0212, -0.1063, 0.2861, -0.3290, 0.0419, -0.0589, -0.3808, 0.1361, + -0.0076, 0.6340, 0.0050, -0.0847, 0.1963, -0.0973, 0.2167, -0.1371, + -0.5370, 0.0038, 0.1249, 0.1643, -0.5423, 0.3050, -0.1512, 0.1519, + -0.2116, 0.2185, -0.0201, 0.0805, -0.4099, 0.4365, 0.4842, -0.3707], + device='cuda:0')), ('module.bn2.running_var', tensor([0.1821, 0.1045, 0.0811, 0.1067, 0.0362, 0.2297, 0.2936, 0.1205, 0.1147, + 0.1005, 0.0931, 0.5177, 0.2772, 0.1406, 0.0876, 0.0800, 0.0336, 0.1262, + 0.0960, 0.0215, 0.1063, 0.0900, 0.2601, 0.1508, 0.1184, 0.1061, 0.1276, + 0.0529, 0.1534, 0.1845, 0.2355, 0.0552, 0.0430, 0.0479, 0.0703, 0.0529, + 0.0888, 0.1233, 0.1285, 0.2397, 0.0884, 0.1318, 0.1151, 0.1675, 0.0955, + 0.2266, 0.1416, 0.1275, 0.0596, 0.0734, 0.2635, 0.0623, 0.1556, 0.6597, + 0.1636, 0.1391, 0.1487, 0.1740, 0.1156, 0.0364, 0.1959, 0.1808, 0.0507, + 0.1009], device='cuda:0')), ('module.bn2.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.bn3.weight', tensor([0.9839, 0.9673, 0.9477, 0.9683, 0.9668, 0.9498, 0.9569, 0.9838, 0.9390, + 0.9897, 0.9579, 0.9353, 0.9943, 0.9474, 0.9777, 0.9801, 0.9576, 0.9584, + 0.9699, 0.9766, 0.9699, 0.9457, 0.9526, 1.0207, 1.0056, 0.9974, 0.9600, + 0.9917, 0.9797, 0.9582, 0.9479, 0.9490, 0.9719, 0.9551, 0.9849, 0.9607, + 0.9838, 0.9388, 0.9446, 0.9625, 0.9870, 0.9498, 0.9769, 0.9684, 0.9197, + 0.9597, 0.9652, 0.9739, 0.9642, 0.9494, 0.9590, 0.9842, 0.9826, 0.9470, + 1.0147, 0.9712, 0.9521, 0.9214, 0.9603, 0.9889, 1.0057, 0.9544, 0.9696, + 0.9579], device='cuda:0')), ('module.bn3.bias', tensor([-0.0331, -0.0112, 0.0057, 0.0016, -0.1097, -0.0550, 0.0029, 0.0223, + -0.0571, 0.0300, -0.0035, 0.0169, 0.0289, -0.0392, 0.0104, -0.0550, + 0.0104, 0.0113, -0.0644, 0.0012, -0.0323, -0.0068, -0.0942, 0.0448, + -0.0752, -0.1161, -0.0092, -0.0118, -0.0095, -0.0124, -0.0480, 0.0430, + 0.0315, -0.0293, -0.0129, -0.0043, -0.0681, -0.0548, -0.0436, -0.0252, + -0.0523, 0.0081, -0.0450, -0.0149, -0.0213, -0.0878, -0.0241, -0.0552, + 0.0046, -0.0224, -0.0077, -0.0313, -0.0356, -0.0055, -0.0509, -0.0015, + -0.0171, -0.1119, 0.0044, -0.0294, 0.0841, -0.0126, -0.0042, -0.0767], + device='cuda:0')), ('module.bn3.running_mean', tensor([-0.1179, -0.2109, -0.1052, 0.1110, 0.0015, 0.0690, -0.1439, -0.2045, + -0.2490, -0.2555, 0.2528, -0.3513, -0.0930, -0.3589, 0.1332, -0.1093, + 0.0311, 0.0076, -0.3407, 0.2026, -0.0316, 0.1119, -0.1663, -0.2405, + -0.1303, 0.0465, 0.0824, -0.2038, 0.2158, 0.2773, -0.0258, -0.3768, + 0.2864, -0.3150, -0.1850, -0.4120, -0.2422, 0.3398, 0.3062, 0.1578, + 0.1047, 0.1097, -0.0118, 0.0271, -0.1421, 0.1104, -0.3231, 0.1490, + 0.3098, 0.0313, -0.0711, -0.2694, -0.0477, -0.2847, -0.2043, 0.0472, + 0.1980, -0.3076, -0.1446, -0.0578, -0.0207, 0.0126, 0.0509, 0.0559], + device='cuda:0')), ('module.bn3.running_var', tensor([0.0311, 0.0632, 0.0329, 0.0725, 0.0299, 0.2389, 0.1650, 0.1427, 0.0887, + 0.0531, 0.2355, 0.1415, 0.0939, 0.1160, 0.0877, 0.0485, 0.0715, 0.0975, + 0.0936, 0.0627, 0.0604, 0.0356, 0.0338, 0.0462, 0.0580, 0.0668, 0.0318, + 0.0964, 0.1594, 0.0955, 0.0424, 0.0889, 0.1725, 0.0734, 0.1686, 0.0895, + 0.0735, 0.0800, 0.0571, 0.1956, 0.0849, 0.0937, 0.1129, 0.1316, 0.1148, + 0.1620, 0.1119, 0.1293, 0.1508, 0.0745, 0.0389, 0.0590, 0.1822, 0.0635, + 0.0297, 0.1179, 0.0884, 0.0759, 0.0723, 0.1092, 0.0566, 0.3219, 0.0481, + 0.0495], device='cuda:0')), ('module.bn3.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.bn4.weight', tensor([0.9719, 0.9288, 0.9404, 0.8981, 0.9558, 0.8647, 0.9938, 0.9839, 0.9504, + 0.8829, 0.9852, 0.9685, 0.9392, 0.9665, 0.9397, 0.9686, 0.9511, 0.9347, + 0.9005, 0.9341, 0.9637, 0.9782, 0.9068, 0.9944, 0.8678, 0.9496, 0.8791, + 0.8913, 0.9705, 0.9599, 0.9173, 0.9419, 0.9714, 0.8662, 0.9614, 0.9636, + 0.8761, 0.9026, 0.9590, 1.0051, 0.9332, 0.9884, 0.9620, 0.9963, 0.9344, + 0.9917, 0.9104, 0.9644, 0.9793, 0.9491, 0.9661, 0.9749, 0.9918, 0.9274, + 0.9731, 0.9629, 0.9864, 0.8924, 0.9374, 0.9265, 1.0030, 0.9332, 0.9231, + 0.9744], device='cuda:0')), ('module.bn4.bias', tensor([-0.0290, -0.0381, -0.0183, -0.0843, -0.0786, -0.0576, -0.0321, -0.0728, + -0.0004, -0.0501, -0.0512, -0.0098, -0.0458, -0.0700, -0.0562, -0.0489, + -0.0136, -0.0957, -0.2043, 0.0487, -0.0604, -0.0490, -0.1111, -0.0637, + -0.1042, -0.1490, -0.0399, -0.0834, -0.0637, -0.0709, -0.0532, -0.0383, + -0.0008, -0.0157, -0.0126, 0.0105, -0.1265, 0.0032, 0.0011, -0.0024, + -0.0607, -0.1279, -0.0568, 0.0023, 0.0132, -0.0262, -0.1053, -0.0529, + -0.0576, -0.0367, -0.1104, -0.0479, -0.0123, -0.0814, -0.0047, -0.0162, + -0.0702, -0.0519, -0.0512, -0.0542, 0.0203, -0.0377, -0.0304, -0.0392], + device='cuda:0')), ('module.bn4.running_mean', tensor([-2.7154e-01, 2.5188e-01, -2.3083e-01, -1.1752e-04, -1.6071e-01, + -1.0288e-03, -1.6060e-01, -3.1812e-01, -5.0881e-01, -2.1096e-01, + -2.6173e-01, -4.0433e-02, -2.6942e-01, -1.7432e-01, -2.6561e-01, + 1.9310e-01, 2.3981e-01, 1.3317e-02, -4.9114e-01, -2.9459e-01, + 1.8738e-01, -3.9850e-02, -4.4220e-01, 6.4368e-03, 1.0282e-01, + -1.7609e-01, -1.8123e-01, 5.2415e-02, 2.9093e-01, -1.0226e-01, + 2.5449e-01, 7.9838e-02, -1.5610e-01, -3.8982e-01, -1.1930e-01, + -7.5275e-02, -1.0730e-01, 6.2043e-02, 1.9573e-02, -4.8201e-01, + -8.2139e-02, -1.5754e-01, -2.3653e-01, -2.1203e-01, -2.7148e-01, + -1.2872e-02, 2.1902e-01, -6.1971e-02, 2.5442e-01, -9.4199e-02, + 8.5456e-02, -3.1384e-01, -5.2701e-02, -1.8283e-02, 9.5014e-05, + -3.0140e-02, -1.3224e-01, -1.0458e-02, 3.2202e-03, -2.1161e-01, + -3.2415e-02, 1.9768e-01, -1.9820e-03, -6.1268e-02], device='cuda:0')), ('module.bn4.running_var', tensor([0.0775, 0.1517, 0.1208, 0.1465, 0.1560, 0.1655, 0.1435, 0.0978, 0.0970, + 0.0966, 0.0883, 0.0750, 0.2008, 0.0495, 0.1411, 0.2070, 0.3223, 0.2494, + 0.0689, 0.0628, 0.1408, 0.2248, 0.1027, 0.0535, 0.1308, 0.0686, 0.2316, + 0.2367, 0.1280, 0.0964, 0.1228, 0.1370, 0.0837, 0.0489, 0.1195, 0.0963, + 0.1629, 0.3504, 0.2324, 0.0411, 0.0992, 0.0932, 0.1617, 0.0878, 0.2154, + 0.1070, 0.2407, 0.2999, 0.0624, 0.1566, 0.1196, 0.0817, 0.1615, 0.2499, + 0.1819, 0.1056, 0.0507, 0.2700, 0.0885, 0.1793, 0.1004, 0.0950, 0.2143, + 0.1332], device='cuda:0')), ('module.bn4.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.bn5.weight', tensor([0.9956, 0.8218, 0.9660, 0.8502, 0.9757, 0.9295, 0.9911, 0.9380, 1.0068, + 0.8971, 0.9877, 0.9681, 0.8669, 0.9044, 0.9771, 0.9356, 0.9960, 0.9869, + 0.9540, 0.8842, 0.8905, 0.9738, 0.8970, 0.9943, 0.9446, 1.0118, 0.8025, + 0.9723, 0.9274, 0.8401, 0.9620, 0.9956, 0.9676, 0.9358, 1.0078, 0.8527, + 0.9738, 0.9063, 0.8941, 0.9772, 0.7482, 0.9677, 0.8094, 0.7970, 0.9199, + 0.9855, 1.0018, 0.9199, 0.9615, 0.9797, 0.9876, 0.9846, 0.9892, 0.9019, + 0.8533, 0.8955, 0.9516, 0.9545, 0.9719, 0.8642, 1.0412, 0.9918, 1.0091, + 1.0158], device='cuda:0')), ('module.bn5.bias', tensor([-1.7646e-02, -4.8253e-02, -2.8858e-02, -4.7392e-02, -7.9532e-02, + -3.0752e-02, -9.9099e-02, -2.5154e-02, -1.1934e-01, -3.5847e-02, + -7.5621e-02, -8.6020e-02, -3.6303e-02, -6.6361e-02, -5.8640e-02, + -1.0676e-01, -1.1631e-01, -1.3472e-01, -9.7941e-02, -9.1114e-02, + -1.9563e-02, -2.5152e-01, -7.3845e-02, -9.8564e-02, -4.4096e-02, + -1.1259e-01, -2.3151e-02, -1.2702e-01, -3.4798e-02, -1.9918e-02, + -6.9000e-02, -1.7100e-01, -1.5731e-01, -6.0053e-05, -1.2194e-01, + -3.8141e-02, -1.7316e-01, -5.8246e-02, -8.9738e-02, -1.2313e-01, + -4.2175e-02, -1.1261e-01, -1.0774e-01, -6.2062e-02, -9.0335e-02, + -1.6811e-01, -1.0884e-01, -6.7475e-02, -1.1029e-02, 1.2906e-02, + -4.5145e-02, -1.1859e-01, -3.8917e-02, -5.6922e-02, -3.2910e-02, + -6.9608e-02, -3.5666e-02, -4.6286e-02, -1.1235e-01, -5.8298e-02, + -1.1326e-01, -1.2617e-01, -6.3157e-02, -8.4112e-03], device='cuda:0')), ('module.bn5.running_mean', tensor([-0.1671, 0.1362, 0.1261, -0.0208, 0.1527, 0.2063, -0.2192, 0.0798, + -0.1073, 0.0608, -0.2205, 0.1398, -0.3201, 0.2858, -0.1489, -0.0047, + 0.1180, 0.1311, 0.3355, -0.2368, -0.2183, -0.1878, 0.6287, 0.0602, + -0.0263, 0.0246, -0.0975, -0.3889, -0.4703, 0.1642, -0.2359, -0.4039, + -0.1448, 0.1513, -0.0390, -0.0513, 0.2277, 0.2182, 0.1174, 0.0410, + 0.1761, 0.0427, -0.0545, -0.1477, -0.1921, -0.1408, 0.1446, 0.0492, + -0.3001, -0.0347, 0.1064, 0.1110, 0.0973, -0.2868, -0.0742, 0.0385, + 0.1662, -0.0487, 0.1617, 0.2526, -0.3219, -0.0930, 0.3640, -0.4084], + device='cuda:0')), ('module.bn5.running_var', tensor([0.0803, 0.0675, 0.0971, 0.1109, 0.0468, 0.0587, 0.0560, 0.0595, 0.0618, + 0.0459, 0.0902, 0.0550, 0.0831, 0.0709, 0.1072, 0.0885, 0.0780, 0.0793, + 0.0913, 0.0720, 0.0550, 0.0554, 0.0756, 0.0593, 0.0704, 0.0716, 0.0796, + 0.0522, 0.0605, 0.1511, 0.0628, 0.0814, 0.1140, 0.0964, 0.0593, 0.0558, + 0.0378, 0.0449, 0.0545, 0.0527, 0.0319, 0.0490, 0.0390, 0.0524, 0.0928, + 0.1487, 0.1069, 0.0375, 0.1076, 0.0694, 0.0599, 0.0765, 0.0604, 0.0294, + 0.0531, 0.0723, 0.0891, 0.0805, 0.0483, 0.0379, 0.0725, 0.0848, 0.0763, + 0.0467], device='cuda:0')), ('module.bn5.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.bn6.weight', tensor([0.3100, 0.1241, 0.1257, ..., 0.2415, 0.1580, 0.3044], device='cuda:0')), ('module.bn6.bias', tensor([-7.5920e-09, -1.2804e-08, 2.9380e-09, ..., 1.4118e-09, + -5.5393e-09, -1.4208e-08], device='cuda:0')), ('module.bn6.running_mean', tensor([ 0.1350, -0.0502, -0.2535, ..., -0.0404, -0.0691, 0.1088], + device='cuda:0')), ('module.bn6.running_var', tensor([0.0224, 0.0177, 0.0538, ..., 0.0379, 0.0629, 0.0546], device='cuda:0')), ('module.bn6.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.bn7.weight', tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], device='cuda:0')), ('module.bn7.bias', tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + device='cuda:0')), ('module.bn7.running_mean', tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + device='cuda:0')), ('module.bn7.running_var', tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], device='cuda:0')), ('module.bn7.num_batches_tracked', tensor(0, device='cuda:0')), ('module.bn8.weight', tensor([0.5975, 0.6336, 0.6173, 0.6104, 0.6317, 0.6728, 0.5002, 0.6813, 0.4865, + 0.5985, 0.6397, 0.6842, 0.5258, 0.5911, 0.5789, 0.5546, 0.5426, 0.6505, + 0.6261, 0.5844, 0.7298, 0.5456, 0.5810, 0.6098, 0.5354, 0.5674, 0.5305, + 0.5879, 0.6743, 0.6612, 0.5703, 0.6846, 0.5236, 0.4663, 0.5830, 0.6295, + 0.6389, 0.6141, 0.6196, 0.5281, 0.5320, 0.5720, 0.5801, 0.5960, 0.4653, + 0.6481, 0.6471, 0.5226, 0.5663, 0.5829, 0.6810, 0.5656, 0.5270, 0.5450, + 0.6240, 0.4726, 0.6167, 0.5670, 0.5743, 0.6302, 0.6045, 0.5235, 0.6135, + 0.6238, 0.6437, 0.6064, 0.6152, 0.6007, 0.5392, 0.5754, 0.5522, 0.5770, + 0.5522, 0.5666, 0.6488, 0.6184, 0.6450, 0.6074, 0.6663, 0.6806, 0.5845, + 0.5674, 0.4125, 0.6113, 0.5152, 0.5826, 0.5761, 0.5295, 0.6270, 0.6220, + 0.6417, 0.5565, 0.5432, 0.5742, 0.6486, 0.5560, 0.5912, 0.4814, 0.6064, + 0.5354, 0.5963, 0.5535, 0.6901, 0.4843, 0.6334, 0.5833, 0.6275, 0.6893, + 0.5902, 0.4670, 0.5237, 0.5259, 0.6049, 0.5312, 0.5465, 0.5641, 0.5297, + 0.6019, 0.6115, 0.5208, 0.5313, 0.5721, 0.7261, 0.5799, 0.5644, 0.6148, + 0.6433, 0.6579, 0.5728, 0.6637, 0.4920, 0.5348, 0.6054, 0.6967, 0.6347, + 0.6901, 0.5789, 0.5934, 0.6175, 0.5708, 0.5831, 0.5093, 0.5488, 0.6062, + 0.5265, 0.4917, 0.5884, 0.5979, 0.5923, 0.6651, 0.5131, 0.5750, 0.5136, + 0.5866, 0.6611, 0.6533, 0.5531, 0.6480, 0.5213, 0.5444, 0.4909, 0.6320, + 0.6691, 0.6666, 0.5485, 0.5622, 0.6017, 0.6123, 0.5685, 0.6451, 0.6198, + 0.6232, 0.6251, 0.5639, 0.6279, 0.6657, 0.6848, 0.5567, 0.5818, 0.5247, + 0.5614, 0.6785, 0.5420, 0.6355, 0.5918, 0.5717, 0.5690, 0.6593, 0.5990, + 0.6394, 0.5861, 0.5881, 0.6242, 0.5771, 0.6306, 0.6191, 0.6725, 0.6106, + 0.5701, 0.5729, 0.6371, 0.5420, 0.5653, 0.4356, 0.6391, 0.5974, 0.6171, + 0.4873, 0.5479, 0.6097, 0.5093, 0.5626, 0.5206, 0.5711, 0.6823, 0.4830, + 0.6145, 0.5612, 0.5915, 0.6442, 0.6279, 0.5503, 0.5646, 0.5565, 0.6741, + 0.4962, 0.6118, 0.5162, 0.5619, 0.6170, 0.5969, 0.5109, 0.5657, 0.6046, + 0.5199, 0.5501, 0.5052, 0.5363, 0.5820, 0.5483, 0.5127, 0.5824, 0.6109, + 0.6180, 0.5710, 0.5554, 0.7066, 0.5692, 0.7009, 0.6320, 0.6670, 0.5894, + 0.6487, 0.6074, 0.5676, 0.4965], device='cuda:0')), ('module.bn8.bias', tensor([-2.4603e-01, -2.0420e-01, -2.3926e-01, -1.8113e-01, 3.6277e-03, + -3.4310e-02, -4.5211e-02, -1.3239e-01, -3.1027e-01, -2.1496e-01, + -1.8401e-01, -1.6175e-01, -1.6365e-01, -1.8879e-01, -2.0187e-01, + -2.2995e-01, -1.9215e-01, 1.7450e-02, -1.4591e-01, -9.0315e-03, + -6.4923e-02, -1.9039e-02, -6.7852e-02, -1.6543e-01, -5.1770e-01, + -2.6707e-01, -5.8962e-02, -1.8673e-01, -8.9573e-02, -8.6647e-02, + -9.3134e-03, -1.1019e-01, -4.8615e-02, 1.3133e-02, -2.2098e-01, + -1.2943e-01, -1.5607e-01, -3.7629e-02, -2.3344e-01, -2.0304e-01, + -9.4395e-02, -3.2505e-02, -9.0034e-02, -2.5953e-01, -3.2737e-01, + -2.5741e-01, -6.1232e-02, 3.1437e-03, 1.3686e-02, -1.5474e-01, + -1.4603e-01, -7.8823e-02, -2.3128e-01, -1.9212e-02, -5.2570e-01, + -1.2511e-02, -1.4340e-01, 1.1813e-02, 9.9758e-04, -1.9165e-01, + -3.2000e-01, 4.9402e-02, -1.5039e-01, -1.2290e-01, -1.0617e-01, + -4.6460e-01, 1.0833e-02, -2.1422e-01, -2.6868e-01, -1.3662e-01, + -6.2994e-03, -2.9373e-01, -2.4041e-01, -3.6354e-01, -2.7847e-01, + -1.8632e-01, -2.4994e-01, -1.2377e-02, -1.7033e-01, -1.0021e-01, + -6.9008e-02, -4.1822e-01, -4.7919e-04, -1.1401e-01, -9.0716e-03, + -1.8538e-01, -9.9036e-03, -6.5337e-02, -1.0455e-01, -2.3924e-01, + -2.0551e-01, 1.7212e-02, -2.5079e-01, 2.5922e-02, -1.8929e-01, + -3.2034e-02, -2.0613e-01, -3.7895e-01, -5.2031e-02, -1.0013e-01, + -9.6603e-02, -1.0561e-01, -1.2527e-01, -3.1434e-02, -3.8905e-01, + -1.4895e-01, -1.1154e-01, -1.0705e-01, -1.2938e-01, -5.0385e-03, + -1.1143e-01, -9.2698e-02, -1.1573e-01, -1.6283e-01, 2.4557e-02, + -3.4486e-01, -4.5515e-01, -2.3260e-01, -1.4217e-01, -2.0953e-01, + -2.5524e-01, -4.7827e-01, -1.2062e-01, -1.4182e-01, -3.8212e-02, + -1.2130e-01, 1.2211e-02, -1.3204e-01, 1.1787e-02, -2.8765e-01, + -1.0789e-01, -7.5041e-02, -1.9882e-01, 1.9179e-02, -1.8313e-01, + -1.2570e-01, -4.3861e-02, -1.4641e-01, -2.7835e-01, -1.2664e-01, + 3.1255e-02, -2.2323e-01, 5.9589e-02, -3.2981e-02, -8.7146e-02, + -1.5633e-02, -2.4376e-01, -1.6883e-01, -3.1566e-02, -1.0349e-01, + -5.2299e-01, -1.4859e-01, -4.2795e-01, -3.1998e-03, 2.6797e-03, + -1.8901e-01, -3.8608e-02, 2.6671e-02, -1.0715e-02, -2.2606e-01, + -5.4738e-01, -1.1043e-01, -7.5873e-02, -1.1971e-01, -5.6222e-02, + 1.6415e-02, -2.0531e-01, -1.8289e-01, -2.2948e-01, 3.4413e-03, + -1.9205e-03, -2.1054e-01, -1.8685e-01, -3.4275e-01, -1.8347e-01, + -1.3209e-01, -1.1491e-01, -5.9706e-02, -2.8571e-01, 2.4211e-03, + -2.6390e-01, -1.6340e-01, -2.9284e-01, -1.1333e-01, -1.1147e-01, + -2.0640e-01, -1.0543e-01, -1.1463e-01, -4.9580e-01, -2.6341e-03, + -3.7357e-02, -2.0058e-02, -7.4550e-02, -2.1391e-01, -1.3184e-01, + -1.4010e-01, -1.5889e-01, -3.5747e-01, -4.4569e-01, -3.3433e-01, + -1.4844e-01, -6.5637e-02, -1.7807e-03, 2.9938e-02, -1.7752e-01, + -1.8577e-01, -5.2774e-02, -4.4365e-01, -1.5993e-03, -2.5719e-01, + 8.6095e-03, -1.8116e-01, 9.6789e-03, -6.1000e-02, -6.5450e-02, + -8.2867e-02, -1.5450e-01, -5.1060e-03, -4.4363e-01, -2.9718e-01, + -1.0969e-01, 3.3728e-02, 1.0786e-02, 3.0184e-02, -7.2835e-02, + -2.5034e-01, -2.2120e-01, -2.9737e-02, -1.2910e-01, -2.3450e-01, + -1.5038e-01, -2.2467e-02, -2.3203e-01, -4.2806e-01, -1.4618e-01, + 8.2870e-02, 3.8532e-02, -1.4712e-02, -5.2360e-02, -2.1458e-01, + -1.8515e-01, -9.4737e-02, -2.1847e-01, -1.4109e-01, -2.9911e-02, + -2.3604e-01, -1.5992e-01, -1.9666e-01, -1.5877e-01, -1.6443e-01, + -2.1488e-01, -2.3350e-01, 2.9479e-02, -3.1726e-01, 4.1851e-02, + -2.6533e-01], device='cuda:0')), ('module.bn8.running_mean', tensor([-2.0283e+00, 4.0840e-02, 3.5831e-03, -6.7090e-02, 5.9631e-01, + -4.8755e-01, -9.9364e-01, -8.5954e-03, 2.9753e-01, -5.4256e-01, + -4.1445e-01, -9.8848e-01, -9.7389e-02, -5.0073e-01, -4.5812e-01, + -6.7543e-01, -8.2462e-01, -1.2276e+00, 1.2158e-01, -6.1915e-01, + 4.7943e-01, -7.9419e-01, 2.5277e-01, -5.8727e-01, -8.2322e-01, + -5.7813e-01, 2.0851e-01, -8.3000e-01, -1.4634e+00, -8.6428e-02, + -9.0040e-01, -1.2791e+00, -1.8846e+00, -5.8082e-01, -5.0797e-01, + -1.1480e+00, 2.5509e-01, -5.7088e-01, -6.3888e-01, -1.6245e+00, + 6.8897e-01, 2.0542e+00, 4.1608e-01, 4.0017e-02, -4.8262e-01, + 1.7681e-01, -1.5150e+00, -5.6165e-01, 1.1971e-01, -4.9809e-01, + -2.7745e-01, -4.5986e-02, 9.1302e-01, 7.3921e-01, 1.1986e-01, + 5.7101e-01, 8.3916e-01, -7.2217e-01, -8.8311e-02, 8.9938e-01, + -8.5279e-01, -9.8137e-01, -1.2328e+00, -2.1072e-02, 1.3524e-01, + -8.6072e-01, 7.3041e-01, 1.9557e-01, -1.7824e+00, -4.7073e-01, + -2.0247e-01, -2.0033e+00, -3.9386e-01, -2.1243e+00, -6.5067e-01, + -7.1110e-01, -1.4061e-01, 2.2077e+00, -4.1148e-01, -5.9354e-01, + -1.7983e-01, -3.1042e-02, -4.5746e-01, -6.5795e-01, 4.1195e-01, + 1.1041e+00, 7.0968e-01, 5.2546e-02, 1.7824e-01, -1.3639e+00, + 2.2819e-01, 1.5307e+00, -4.4448e-01, 2.5587e-01, -5.7763e-01, + -9.7971e-01, 7.9011e-01, -7.9087e-01, 1.0300e+00, -1.8775e+00, + 7.8660e-01, 7.5173e-01, 1.0607e-01, 3.7644e-01, -3.3142e-01, + -1.2753e+00, 5.8203e-01, 2.6286e-01, -6.5150e-01, -1.5330e+00, + -9.6321e-01, -6.4370e-01, 4.2368e-01, 4.9135e-01, -9.7401e-01, + -3.6962e-01, -9.7116e-01, -5.9903e-01, -2.5891e-01, -9.5421e-01, + -1.1445e+00, 9.6036e-02, 1.1855e-01, -1.3633e+00, 5.1252e-03, + 3.7890e-02, 1.3806e+00, -3.6376e-01, -1.0937e-01, -1.1760e-01, + -2.5344e-01, -3.7274e-01, -1.4159e-01, 2.3785e-01, 2.0710e-01, + -8.8444e-01, 3.0720e-01, -6.4719e-01, 5.4683e-01, -1.8802e-01, + 4.3419e-01, -8.1701e-01, 4.0321e-02, 7.1071e-01, -1.8930e+00, + 1.0860e+00, 1.0797e+00, -1.5746e-01, 6.8257e-01, -7.0189e-02, + -1.1446e+00, -5.0258e-01, -2.9839e-01, -2.4193e-01, -7.6033e-01, + -1.3785e+00, -1.0725e+00, 4.0829e-01, -1.6946e+00, 4.4058e-01, + -5.8764e-01, -2.0746e-01, 6.0239e-01, 7.3826e-01, 1.0900e+00, + -7.1778e-02, 2.8387e-01, -7.5113e-01, 2.7873e-01, 3.2644e-01, + -1.3364e+00, 1.0656e+00, -5.9139e-04, -2.8503e-01, -6.4286e-01, + -6.1208e-02, 1.0951e+00, -3.6838e-01, -2.1318e-01, 1.8973e-01, + -2.0989e-01, -2.3763e-01, -1.0747e+00, -1.9353e+00, 8.7215e-01, + 1.0256e+00, -1.7362e-01, 7.3548e-01, 1.4111e-01, -5.5416e-01, + 2.4390e-01, 6.4423e-02, -2.1135e-01, -9.8368e-01, -2.9405e-01, + -1.6668e+00, 4.6341e-01, 1.2250e-01, -5.5501e-01, -4.7055e-01, + 2.1411e-01, -3.3290e-01, -1.1014e+00, -5.0471e-01, 2.8561e-02, + 7.3296e-01, 4.8726e-01, -7.1554e-01, -3.3608e-01, 2.3547e-02, + 3.0119e-01, 1.4341e+00, 1.3812e+00, 4.6313e-01, -4.3285e-01, + -1.3715e+00, -8.1076e-01, 5.3997e-01, 2.6896e-01, -4.9527e-01, + 6.2146e-01, 1.7119e-01, 1.2711e+00, 1.5919e-01, -8.3132e-02, + -4.0712e-01, -4.5849e-01, 1.1198e+00, -6.1305e-01, 3.2838e-01, + -9.5643e-01, 6.9190e-01, -5.2347e-01, -7.4339e-01, -4.6691e-02, + 4.7998e-01, 7.6136e-01, 5.9913e-01, -1.0608e+00, -4.2126e-01, + 1.0778e+00, -9.3157e-02, -8.1845e-01, 1.4165e+00, 8.8506e-02, + 1.1951e+00, -4.6053e-01, -2.9598e-01, -1.0353e+00, -1.5638e-01, + -1.8602e+00, -5.0030e-01, 5.7830e-01, -8.2748e-01, 2.0043e+00, + 5.3103e-01], device='cuda:0')), ('module.bn8.running_var', tensor([0.0531, 0.0432, 0.0521, 0.0578, 0.0544, 0.1228, 0.0397, 0.0574, 0.0540, + 0.0353, 0.0486, 0.0607, 0.0464, 0.0311, 0.0592, 0.0557, 0.0396, 0.1498, + 0.0525, 0.0540, 0.0617, 0.0512, 0.0305, 0.0860, 0.0515, 0.0524, 0.0268, + 0.0376, 0.0653, 0.0307, 0.0265, 0.0631, 0.0277, 0.0501, 0.0538, 0.0192, + 0.0615, 0.0782, 0.0537, 0.0363, 0.0451, 0.0399, 0.0557, 0.0400, 0.0294, + 0.0567, 0.0579, 0.1004, 0.0529, 0.0451, 0.0497, 0.0233, 0.0189, 0.0488, + 0.0291, 0.0481, 0.0389, 0.0589, 0.1375, 0.0415, 0.0612, 0.0822, 0.0428, + 0.0750, 0.0676, 0.0271, 0.0852, 0.0576, 0.0364, 0.0334, 0.0775, 0.0351, + 0.0446, 0.0337, 0.0268, 0.0629, 0.0577, 0.0316, 0.0426, 0.0366, 0.0229, + 0.0472, 0.0377, 0.0285, 0.0340, 0.0441, 0.0464, 0.0681, 0.0387, 0.0529, + 0.0468, 0.0719, 0.0278, 0.0578, 0.0831, 0.0804, 0.0478, 0.0312, 0.0413, + 0.0524, 0.0464, 0.0389, 0.0563, 0.0600, 0.0591, 0.0623, 0.0295, 0.0622, + 0.0871, 0.0434, 0.0277, 0.0362, 0.0418, 0.0544, 0.0648, 0.0218, 0.0488, + 0.0461, 0.0447, 0.0559, 0.0497, 0.0410, 0.0552, 0.0600, 0.0467, 0.0510, + 0.0381, 0.0729, 0.0492, 0.0515, 0.0356, 0.0420, 0.0549, 0.0699, 0.0372, + 0.0558, 0.0743, 0.0211, 0.0498, 0.0356, 0.0646, 0.0355, 0.0637, 0.0411, + 0.0594, 0.0658, 0.0510, 0.0407, 0.0505, 0.0620, 0.0300, 0.0388, 0.0370, + 0.0841, 0.0858, 0.0373, 0.0341, 0.0533, 0.0586, 0.0344, 0.0270, 0.0442, + 0.0468, 0.0625, 0.0669, 0.0506, 0.0471, 0.0544, 0.0556, 0.0486, 0.0648, + 0.0298, 0.0470, 0.0516, 0.0375, 0.0649, 0.0427, 0.0288, 0.0229, 0.0337, + 0.0414, 0.0376, 0.0232, 0.0776, 0.0408, 0.0398, 0.0542, 0.0589, 0.0580, + 0.0441, 0.0236, 0.0538, 0.0249, 0.0447, 0.0541, 0.0801, 0.0758, 0.0473, + 0.0350, 0.0553, 0.0460, 0.0670, 0.0355, 0.0767, 0.0479, 0.0364, 0.0422, + 0.0298, 0.0358, 0.0430, 0.0960, 0.0486, 0.1065, 0.0305, 0.0647, 0.0649, + 0.0557, 0.0524, 0.0365, 0.0608, 0.0745, 0.0569, 0.0722, 0.0612, 0.0521, + 0.0402, 0.0585, 0.0555, 0.0645, 0.0441, 0.0250, 0.0323, 0.0500, 0.0489, + 0.0326, 0.0645, 0.0778, 0.0453, 0.0312, 0.0514, 0.0353, 0.0339, 0.0518, + 0.0480, 0.0332, 0.0429, 0.0569, 0.0477, 0.0427, 0.0768, 0.0333, 0.0448, + 0.0502, 0.0633, 0.0474, 0.0440], device='cuda:0')), ('module.bn8.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.bn9.weight', tensor([0.6226, 0.5779, 0.7111, 0.6101, 0.7459, 0.7202, 0.6411, 0.5684, 0.7304, + 0.5505, 0.5243, 0.6461, 0.6926, 0.6356, 0.5848, 0.6103, 0.7593, 0.6569, + 0.5361, 0.5942, 0.5781, 0.5581, 0.5624, 0.5988, 0.4425, 0.5700, 0.6438, + 0.6012, 0.5232, 0.6162, 0.6568, 0.5419, 0.6366, 0.6227, 0.4681, 0.6104, + 0.6165, 0.5403, 0.7192, 0.6316, 0.5093, 0.7578, 0.7485, 0.5667, 0.6746, + 0.6822, 0.5845, 0.6114, 0.6166, 0.7158, 0.5597, 0.5891, 0.6213, 0.5997, + 0.5359, 0.5449, 0.6071, 0.6615, 0.6150, 0.5765, 0.5491, 0.6936, 0.6118, + 0.6273, 0.6168, 0.5931, 0.7683, 0.7403, 0.5190, 0.6243, 0.7413, 0.6641, + 0.5093, 0.5727, 0.5620, 0.5876, 0.6844, 0.6052, 0.5821, 0.5480, 0.5626, + 0.5544, 0.6318, 0.6014, 0.7053, 0.7174, 0.5355, 0.5201, 0.5152, 0.5391, + 0.6946, 0.5794, 0.5174, 0.5422, 0.6392, 0.6332, 0.5782, 0.7359, 0.6275, + 0.5356, 0.6401, 0.5545, 0.5097, 0.5167, 0.6474, 0.7017, 0.5151, 0.6150, + 0.5057, 0.7647, 0.5022, 0.5596, 0.6011, 0.5422, 0.5353, 0.5854, 0.5972, + 0.6718, 0.6263, 0.5183, 0.5773, 0.6467, 0.5548, 0.6688, 0.5383, 0.7225, + 0.5821, 0.8546, 0.6255, 0.6119, 0.7325, 0.5714, 0.6477, 0.4745, 0.7224, + 0.6924, 0.6096, 0.6536, 0.5273, 0.7535, 0.6486, 0.4737, 0.6451, 0.5303, + 0.7275, 0.6395, 0.5261, 0.4678, 0.6351, 0.5788, 0.6072, 0.7128, 0.6788, + 0.5757, 0.5632, 0.7167, 0.6581, 0.6026, 0.5152, 0.6542, 0.5238, 0.5934, + 0.5098, 0.5474, 0.5955, 0.5794, 0.5691, 0.6741, 0.7431, 0.5605, 0.7425, + 0.4771, 0.6316, 0.5784, 0.6175, 0.5925, 0.7595, 0.4885, 0.6532, 0.7380, + 0.5513, 0.7149, 0.6233, 0.6725, 0.5737, 0.6078, 0.5994, 0.7083, 0.6195, + 0.5393, 0.5609, 0.8236, 0.5870, 0.5723, 0.5348, 0.5125, 0.6559, 0.7676, + 0.5725, 0.5361, 0.6094, 0.5743, 0.5731, 0.4321, 0.5596, 0.5669, 0.5459, + 0.6645, 0.5306, 0.5926, 0.5306, 0.5523, 0.5735, 0.6918, 0.5582, 0.5910, + 0.6099, 0.5703, 0.5421, 0.4958, 0.5488, 0.7844, 0.6439, 0.7709, 0.6108, + 0.5091, 0.7249, 0.7102, 0.5906, 0.5033, 0.6028, 0.5491, 0.6216, 0.6770, + 0.6140, 0.5667, 0.6594, 0.6803, 0.5505, 0.6597, 0.6188, 0.6077, 0.5960, + 0.5762, 0.5890, 0.5766, 0.7170, 0.6740, 0.7188, 0.7555, 0.5970, 0.5619, + 0.6517, 0.7393, 0.5757, 0.5051], device='cuda:0')), ('module.bn9.bias', tensor([-0.2170, -0.0310, -0.0988, 0.0487, -0.0312, -0.0130, -0.0537, -0.0426, + 0.0014, -0.0416, -0.0396, -0.0725, -0.0075, -0.0937, -0.1644, -0.0798, + -0.1929, 0.0067, -0.0787, 0.0544, -0.0484, -0.1948, -0.0049, 0.0090, + -0.1226, -0.0614, -0.0773, -0.0869, 0.0276, -0.0051, -0.1750, -0.1782, + 0.0702, 0.0293, 0.0192, -0.0831, -0.0519, -0.0622, -0.0310, 0.0473, + 0.0115, -0.0072, -0.0604, 0.0569, 0.0387, -0.0205, -0.0636, 0.0378, + -0.1546, -0.0526, -0.1494, 0.0343, -0.0199, 0.0012, 0.0051, -0.0014, + -0.1953, 0.0316, -0.1846, 0.0519, -0.0836, -0.0976, -0.0880, 0.1005, + -0.0331, -0.1253, 0.0254, 0.0263, -0.1414, -0.0912, -0.0212, -0.0592, + -0.3242, -0.0332, -0.0921, -0.3251, -0.0107, -0.2413, -0.0921, -0.0573, + -0.0654, -0.2153, 0.0033, -0.2458, 0.0437, -0.1673, 0.0677, -0.2057, + 0.0821, -0.0131, 0.0227, -0.1236, -0.2385, -0.0143, -0.0123, -0.0346, + -0.0052, -0.1061, 0.0841, -0.0014, 0.0377, -0.2477, -0.0790, -0.1276, + -0.0130, 0.0166, -0.3100, -0.0152, -0.1520, -0.0073, 0.0600, -0.2974, + 0.0602, -0.0320, -0.0776, -0.1355, 0.0946, -0.0443, -0.0232, -0.3531, + 0.0545, 0.0883, -0.0968, -0.1333, -0.3653, 0.0034, 0.0171, 0.0065, + -0.0617, 0.0243, -0.0501, -0.2803, -0.2336, 0.0566, -0.0109, -0.0022, + 0.0525, -0.1713, -0.2902, 0.0135, -0.2197, 0.0879, 0.0112, -0.0573, + -0.1882, 0.0200, -0.1664, -0.0685, 0.0494, 0.0616, 0.0487, 0.0080, + 0.0259, 0.0402, -0.1211, -0.0805, -0.1758, 0.0028, -0.2267, -0.2386, + -0.0307, -0.0068, -0.1553, -0.0744, 0.0674, -0.1869, -0.0832, 0.0753, + 0.0245, -0.0422, -0.0455, -0.1663, 0.0308, -0.0209, -0.2745, -0.2819, + -0.0409, -0.0598, 0.0402, -0.0241, 0.0534, 0.0685, -0.2451, 0.0352, + -0.1865, -0.0997, 0.1086, -0.1171, -0.0567, -0.0142, -0.1399, 0.0151, + -0.1117, -0.0816, -0.3181, -0.0265, 0.0528, 0.0076, -0.1335, -0.0827, + 0.0046, -0.0197, 0.0757, -0.0917, 0.0098, 0.0395, -0.0686, -0.0401, + -0.0138, 0.0804, 0.0633, -0.2460, -0.0473, 0.0353, -0.0249, -0.0243, + 0.0711, 0.0348, 0.0430, -0.1605, -0.0386, -0.0667, -0.2364, 0.0509, + -0.2598, -0.2726, 0.0156, -0.0554, -0.0502, -0.1221, 0.0201, -0.0576, + 0.0900, -0.1347, 0.0304, -0.0324, -0.1809, -0.1486, 0.0712, -0.0160, + -0.1753, -0.0975, -0.0247, 0.0352, -0.0621, 0.0609, -0.0325, 0.0135, + 0.0200, 0.0407, -0.2593, -0.0471, -0.2215, 0.0491, 0.0535, -0.2491], + device='cuda:0')), ('module.bn9.running_mean', tensor([-3.4690e-01, -3.1970e-02, -1.2713e-01, 7.8237e-02, -2.1373e-02, + -5.7925e-02, 1.2020e-03, -1.5022e-01, -8.9048e-02, -1.0673e-02, + -1.4895e-01, -9.6178e-02, -3.6889e-02, -7.7319e-02, -3.4310e-01, + -1.2211e-01, -2.3033e-01, -1.3725e-01, -1.6090e-01, 1.8006e-02, + -9.4403e-02, -1.8156e-01, -1.3622e-01, -5.9155e-02, -2.3709e-01, + -1.2678e-01, -1.4457e-03, -6.3109e-02, -1.2476e-02, 2.9832e-02, + -3.7519e-01, -3.6943e-01, 6.4961e-03, 1.0919e-02, -1.8239e-01, + -1.3789e-01, -2.5692e-01, -1.8134e-01, -8.6468e-02, -1.8330e-02, + -1.2256e-01, 1.1145e-01, -1.3317e-01, -4.4275e-02, -1.0242e-01, + -1.6042e-01, -1.2620e-01, -8.1089e-03, -2.7272e-01, -2.0920e-01, + -3.7624e-01, -5.3083e-02, -7.9922e-02, -2.4579e-01, -4.1349e-02, + -1.8715e-02, -4.6828e-01, -3.6632e-02, -3.1954e-01, -4.5855e-02, + -2.7011e-01, -2.5727e-01, -9.7844e-02, 2.4020e-01, -8.8557e-02, + -3.5869e-01, 1.3651e-01, -1.1983e-02, -2.7594e-01, -2.3819e-01, + -6.1687e-02, -2.3113e-01, -5.0003e-01, -2.2050e-01, -6.8881e-02, + -4.6418e-01, -9.7993e-02, -5.0303e-01, -1.0902e-01, -1.1366e-01, + -1.1766e-01, -4.4700e-01, -5.7806e-02, -3.4928e-01, -6.2481e-03, + -2.0928e-01, 1.7405e-01, -3.4542e-01, 1.0241e-01, -3.7274e-02, + -6.1344e-02, -1.9960e-01, -4.2360e-01, -1.0810e-01, -1.0010e-01, + -1.2489e-01, -5.3978e-02, -2.2705e-01, 1.6446e-02, -2.5721e-02, + 6.0314e-03, -3.6459e-01, -1.8675e-01, -2.8506e-01, 3.6109e-03, + -1.1740e-01, -4.9686e-01, -1.3097e-01, -3.5433e-01, -6.1953e-02, + 5.9330e-03, -4.9691e-01, 1.0302e-01, -1.0784e-01, -1.5385e-01, + -2.3930e-01, -7.6253e-02, -1.7073e-01, -4.2903e-02, -5.9578e-01, + -2.3033e-02, 7.3991e-02, -1.5132e-01, -2.2875e-01, -5.7464e-01, + 1.2045e-02, 1.5259e-01, 2.2698e-02, -1.2321e-02, 2.8336e-02, + -7.0975e-02, -4.7671e-01, -3.2868e-01, -1.4916e-02, 1.2285e-01, + 7.6774e-02, 1.4494e-01, -4.1345e-01, -4.6182e-01, -4.8463e-04, + -3.6147e-01, -3.8726e-02, -9.4645e-02, -9.8972e-02, -3.3224e-01, + 6.6328e-02, -3.8574e-01, -6.5025e-02, -1.9355e-02, -4.1928e-02, + -2.7942e-02, -1.3675e-01, -9.0207e-02, 1.6240e-01, -2.1510e-01, + -2.5985e-01, -3.0287e-01, -1.0513e-01, -3.8030e-01, -3.6728e-01, + -1.1745e-01, -5.0993e-02, -3.7855e-01, -9.0005e-02, 1.0385e-01, + -2.4488e-01, -1.3680e-01, 8.1523e-03, 4.2134e-03, -4.7236e-02, + -1.3577e-01, -3.4590e-01, 1.7077e-01, -9.6146e-02, -4.1343e-01, + -4.8108e-01, -3.5540e-02, -7.4613e-02, 1.1850e-02, -1.4102e-01, + 5.6265e-02, 8.6475e-02, -3.0433e-01, -1.0524e-01, -3.2874e-01, + -2.1750e-01, -5.2179e-02, -1.4227e-01, -2.2266e-01, -7.4450e-02, + -1.3168e-01, 6.0100e-02, -3.6141e-01, -2.0720e-01, -5.6104e-01, + -2.5403e-02, -6.4887e-02, 8.2603e-03, -1.4174e-01, -2.6638e-01, + -4.4565e-02, -9.0775e-02, 2.8629e-04, -8.0418e-02, -3.3829e-02, + 2.2968e-02, -7.5467e-02, -7.3682e-02, -1.1375e-02, 5.0907e-02, + 2.9746e-02, -5.6768e-01, -5.5499e-02, 2.7643e-02, -1.0926e-01, + -8.0734e-02, -2.0403e-02, 6.3541e-02, -3.6691e-02, -3.6280e-01, + -1.3140e-01, -9.2061e-02, -3.5287e-01, -4.2136e-02, -5.3335e-01, + -4.7398e-01, -5.2177e-02, -1.9104e-02, 4.3454e-02, -1.7929e-01, + -7.1999e-02, -7.9322e-02, 4.1897e-02, -2.1090e-01, 1.0160e-01, + -5.6137e-02, -3.5983e-01, -2.9724e-01, 8.7154e-03, -2.2268e-02, + -3.2232e-01, -1.1682e-01, -9.0703e-02, -6.0229e-02, -4.9165e-02, + -4.4359e-03, -1.1074e-01, -4.2587e-03, -8.4900e-02, -6.3527e-02, + -5.1124e-01, -7.7233e-02, -3.4528e-01, 7.0478e-02, -8.7243e-02, + -1.9804e-01], device='cuda:0')), ('module.bn9.running_var', tensor([0.1899, 0.1113, 0.0920, 0.1232, 0.3023, 0.1324, 0.3175, 0.1891, 0.1293, + 0.1455, 0.1242, 0.1585, 0.2575, 0.0841, 0.0880, 0.1728, 0.3042, 0.1273, + 0.1115, 0.1099, 0.1763, 0.1694, 0.1699, 0.2841, 0.0747, 0.1207, 0.1776, + 0.1846, 0.0553, 0.2883, 0.2296, 0.1603, 0.1294, 0.1453, 0.1052, 0.1480, + 0.0919, 0.2332, 0.1185, 0.2944, 0.1343, 0.1549, 0.1048, 0.2091, 0.1598, + 0.1182, 0.2430, 0.2449, 0.2083, 0.0908, 0.0655, 0.1696, 0.2503, 0.1136, + 0.0905, 0.1073, 0.1995, 0.1595, 0.1302, 0.2162, 0.0717, 0.0868, 0.0797, + 0.1406, 0.1039, 0.1326, 0.1950, 0.1846, 0.0797, 0.1382, 0.1369, 0.1290, + 0.1097, 0.0987, 0.1989, 0.1687, 0.1737, 0.1114, 0.1545, 0.2358, 0.1121, + 0.1554, 0.2403, 0.2232, 0.1902, 0.2537, 0.1350, 0.1975, 0.1062, 0.2059, + 0.1318, 0.1179, 0.1242, 0.1484, 0.1743, 0.2062, 0.1420, 0.2236, 0.1886, + 0.1294, 0.2888, 0.1803, 0.0544, 0.1089, 0.2478, 0.1400, 0.0913, 0.1284, + 0.0938, 0.1643, 0.1373, 0.1081, 0.1212, 0.1176, 0.1589, 0.0802, 0.1343, + 0.1485, 0.1413, 0.1245, 0.0903, 0.2746, 0.1165, 0.1190, 0.1228, 0.2268, + 0.0778, 0.2722, 0.2508, 0.1219, 0.1212, 0.1587, 0.2205, 0.2382, 0.1755, + 0.1493, 0.1811, 0.1031, 0.1094, 0.2810, 0.1863, 0.0612, 0.2947, 0.1417, + 0.2432, 0.2634, 0.0868, 0.1230, 0.1974, 0.2540, 0.2218, 0.2026, 0.1112, + 0.0848, 0.0982, 0.1768, 0.1584, 0.1095, 0.0750, 0.2081, 0.1977, 0.1275, + 0.0961, 0.0773, 0.1142, 0.1429, 0.1599, 0.2117, 0.1111, 0.1273, 0.1755, + 0.0804, 0.0966, 0.1560, 0.1835, 0.1723, 0.1644, 0.2175, 0.1650, 0.2210, + 0.1236, 0.1582, 0.2410, 0.1201, 0.0893, 0.1952, 0.2001, 0.3300, 0.1487, + 0.1550, 0.1974, 0.2892, 0.0904, 0.1494, 0.1432, 0.1586, 0.2218, 0.2921, + 0.1035, 0.0743, 0.2720, 0.0967, 0.0682, 0.1022, 0.2986, 0.1707, 0.0708, + 0.1512, 0.1431, 0.1295, 0.0961, 0.1390, 0.1940, 0.1797, 0.1289, 0.1079, + 0.1925, 0.0919, 0.2591, 0.1006, 0.2292, 0.2350, 0.2042, 0.2713, 0.1428, + 0.1183, 0.1672, 0.2736, 0.1628, 0.0901, 0.0923, 0.1417, 0.2522, 0.1513, + 0.1823, 0.1416, 0.0822, 0.1877, 0.2366, 0.1861, 0.2000, 0.1462, 0.1716, + 0.1434, 0.1858, 0.1693, 0.1788, 0.3953, 0.1661, 0.1958, 0.1266, 0.0838, + 0.1994, 0.3833, 0.1797, 0.0676], device='cuda:0')), ('module.bn9.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.bn10.weight', tensor([0.8015, 0.2252, 0.9373, 0.9009, 0.8371, 0.9301, 0.4780, 0.6508, 0.5210, + 0.9029, 0.9498, 0.7184, 0.3249, 0.3362, 0.9156, 0.9676, 0.9653, 0.8013, + 0.9641, 0.9074, 0.7074, 0.7666, 0.7584, 0.8730, 0.2298, 0.9501, 0.2364, + 0.0741, 0.9448, 0.8492, 0.9594, 0.8277, 0.4666, 0.9187, 0.8561, 0.9430, + 0.8778, 0.7370, 0.7970, 0.9080, 0.6319, 0.9229, 0.9372, 0.1750, 0.4493, + 0.8366, 0.6683, 0.2292, 0.3172, 0.9359, 0.9505, 0.8996, 0.3965, 0.8629, + 0.8986, 0.9294, 0.8499, 0.7422, 0.3883, 0.9576, 0.7270, 0.8534, 0.9350, + 0.8158, 0.9616, 0.9568, 0.9277, 0.8707, 0.9401, 0.2764, 0.5540, 0.7615, + 0.9572, 0.5237, 0.9488, 0.6275, 0.8726, 0.6882, 0.9384, 0.9068, 0.7133, + 0.9206, 0.8501, 0.8965, 0.8984, 0.9614, 0.4725, 0.9484, 0.9400, 0.9529, + 0.3343, 0.9027, 0.6408, 0.7421, 0.8778, 0.6821, 0.6470, 0.6392, 0.6938, + 0.8965, 0.9144, 0.7558, 0.8165, 0.7853, 0.9597, 0.9158, 0.5280, 0.9324, + 0.7621, 0.5329, 0.2605, 0.9593, 0.5097, 0.8010, 0.9206, 0.7345, 0.7195, + 0.5423, 0.8894, 0.5222, 0.8384, 0.9545, 0.5171, 0.9189, 0.6087, 0.4509, + 0.3672, 0.8354], device='cuda:0')), ('module.bn10.bias', tensor([-8.6660e-02, -2.7512e-02, 1.0634e-02, -1.4279e-02, -3.7932e-02, + 2.2786e-03, -4.8029e-02, 1.4300e-05, -3.4495e-02, -8.5370e-02, + -8.6371e-02, -5.0839e-02, -2.0320e-02, -1.5466e-02, -2.9051e-02, + -7.9343e-03, -7.3030e-02, -3.1973e-02, -8.2262e-02, 2.3977e-02, + -3.0186e-03, -2.9487e-02, -4.9020e-02, -4.5043e-02, -2.3022e-02, + -6.6896e-03, -2.0929e-02, -3.7868e-03, -1.0604e-03, -5.8307e-03, + -4.7679e-02, -3.4500e-02, -2.1458e-02, 1.6146e-02, -1.8226e-02, + -4.9448e-02, -4.5903e-03, -2.3152e-03, 1.7686e-02, 4.2685e-02, + 1.3087e-02, -4.6810e-02, 3.6077e-03, -1.7218e-02, -3.8915e-02, + 2.8525e-02, -9.2412e-03, -1.6305e-02, -1.2359e-02, -6.7783e-03, + -4.5505e-02, 2.9263e-02, -2.1087e-02, -1.0610e-01, -6.5504e-03, + -3.3754e-02, 2.3730e-02, -3.4061e-02, -2.7593e-02, -6.7381e-02, + -2.4079e-02, 2.3773e-03, -8.3001e-02, 1.1731e-03, -7.3067e-02, + -6.7051e-02, -1.0855e-02, -6.8399e-02, -6.9189e-02, -2.0795e-02, + -9.3339e-03, -1.1927e-01, -3.5769e-02, -9.6744e-03, 6.3519e-03, + 4.0807e-03, -4.8269e-02, -5.4872e-03, -2.5825e-02, -4.7765e-03, + -5.8219e-02, -2.9842e-02, -4.2608e-02, -6.9282e-03, -3.0244e-02, + -2.5694e-02, -9.1605e-03, 1.8612e-03, 8.4631e-03, -3.3621e-02, + -1.8484e-02, 1.4802e-02, 8.9858e-03, 1.5266e-02, -7.1313e-03, + -1.0148e-02, -3.6386e-02, -8.8002e-03, -8.6535e-03, -2.0897e-02, + -3.2791e-03, -9.5493e-03, -2.5698e-02, 1.3044e-02, -5.3298e-03, + 1.5053e-03, -2.3083e-02, -6.9719e-02, -3.6337e-02, 4.9225e-03, + -2.0425e-02, -2.2978e-02, -4.3961e-03, -2.9562e-02, -5.4046e-02, + -1.5622e-02, -1.3388e-02, -5.1301e-02, -4.6756e-02, -2.3418e-02, + -1.5255e-02, -9.0177e-03, -6.9495e-02, 1.0543e-02, 6.3831e-03, + -6.0427e-02, -2.2419e-02, 1.8376e-02], device='cuda:0')), ('module.bn10.running_mean', tensor([ 0.2301, -0.0858, -0.2921, -0.0457, 0.0209, -0.4680, 0.3066, -0.3436, + 0.2388, 0.5737, -0.3007, 0.2554, -0.0358, -0.0747, -0.2055, -0.2635, + 0.0317, 0.3802, 0.2516, -0.3124, -0.2975, 0.2440, -0.1890, 0.3440, + 0.0074, -0.2376, 0.0031, -0.0044, -0.1872, -0.1042, -0.1907, -0.1874, + 0.1850, -0.2955, -0.1855, -0.0908, -0.3136, -0.3080, -0.2629, 0.0856, + -0.2758, 0.2606, -0.1625, -0.0790, 0.2225, -0.4050, -0.1213, -0.0133, + -0.0087, -0.3481, -0.2254, -0.1735, -0.2240, 0.2645, 0.2940, 0.0359, + -0.1327, 0.3415, -0.0921, -0.3031, 0.2253, -0.1436, -0.2576, -0.3225, + 0.2778, -0.0991, -0.1911, 0.0288, 0.0994, -0.0483, 0.0646, 0.3204, + -0.2163, -0.0381, -0.1356, -0.2670, -0.1516, -0.0927, -0.1902, -0.4946, + 0.2121, -0.2799, -0.0722, -0.2272, -0.2151, -0.3749, -0.3035, -0.1858, + 0.3971, -0.4306, -0.0274, -0.4580, -0.1872, -0.3702, 0.2135, -0.2760, + 0.4323, -0.2614, -0.1481, -0.2262, 0.3974, 0.4103, 0.0756, -0.2605, + -0.1395, -0.2482, -0.1717, -0.1674, -0.1779, -0.2963, -0.0107, 0.2392, + -0.3089, 0.3841, 0.5516, -0.2215, 0.1260, 0.1341, -0.0015, -0.1106, + -0.0487, -0.4744, 0.1664, 0.5308, -0.3666, 0.2434, 0.0023, 0.0905], + device='cuda:0')), ('module.bn10.running_var', tensor([3.0393e-01, 1.4213e-02, 8.2882e-01, 7.0779e-01, 4.2531e-01, 5.1979e-01, + 1.0894e-01, 1.8011e-01, 8.8379e-02, 4.0628e-01, 9.4501e-01, 1.6457e-01, + 1.0463e-02, 1.5628e-02, 6.5976e-01, 9.9919e-01, 8.3391e-01, 1.8527e-01, + 8.1469e-01, 3.9413e-01, 1.8188e-01, 1.4616e-01, 4.8189e-01, 2.4004e-01, + 1.6527e-02, 7.9543e-01, 1.1487e-02, 2.4170e-04, 7.4977e-01, 3.9868e-01, + 6.3322e-01, 3.3846e-01, 1.6624e-01, 6.3598e-01, 3.1920e-01, 9.8898e-01, + 3.1216e-01, 1.1661e-01, 2.1431e-01, 7.1452e-01, 1.8172e-01, 4.7235e-01, + 7.8197e-01, 4.9825e-03, 1.0185e-01, 3.5355e-01, 8.0131e-02, 2.6818e-03, + 3.7468e-03, 6.6704e-01, 8.1577e-01, 3.9523e-01, 9.4484e-02, 3.6161e-01, + 5.8243e-01, 4.3409e-01, 3.5554e-01, 1.8034e-01, 4.2000e-02, 7.5259e-01, + 1.8168e-01, 5.6277e-01, 7.6217e-01, 2.1920e-01, 8.2623e-01, 1.0546e+00, + 6.3078e-01, 5.5690e-01, 7.7662e-01, 2.0980e-02, 2.2966e-01, 3.0030e-01, + 5.6320e-01, 1.0852e-01, 6.3178e-01, 1.8067e-01, 5.5005e-01, 1.1868e-01, + 9.2327e-01, 3.9416e-01, 2.1718e-01, 5.3000e-01, 2.9804e-01, 4.2417e-01, + 4.4208e-01, 9.4922e-01, 1.0552e-01, 7.8810e-01, 5.8501e-01, 8.0887e-01, + 1.3617e-02, 4.4939e-01, 1.4311e-01, 2.8314e-01, 5.9850e-01, 1.4934e-01, + 2.3435e-01, 1.4563e-01, 1.4877e-01, 5.1729e-01, 4.8582e-01, 3.1407e-01, + 2.7550e-01, 1.9792e-01, 7.4529e-01, 5.9428e-01, 8.1008e-02, 9.4468e-01, + 1.7417e-01, 8.7544e-02, 1.0872e-02, 6.6777e-01, 9.9943e-02, 2.5464e-01, + 3.9253e-01, 1.8366e-01, 1.7111e-01, 1.4506e-01, 4.4178e-01, 5.2595e-02, + 5.3852e-01, 7.2784e-01, 1.1154e-01, 4.7631e-01, 1.8129e-01, 1.0700e-01, + 1.2552e-02, 3.1532e-01], device='cuda:0')), ('module.bn10.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.conv1.0.weight', tensor([[[[-3.0334e-02]], + + [[-1.7828e-01]], + + [[ 2.3694e-01]], + + [[ 3.1492e-01]], + + [[ 3.6839e-01]], + + [[-5.2105e-02]]], + + + [[[ 2.0789e-01]], + + [[-3.1774e-02]], + + [[-5.1761e-02]], + + [[-1.7135e-01]], + + [[-9.9765e-02]], + + [[ 4.5945e-02]]], + + + [[[-1.6049e-01]], + + [[ 1.2615e-01]], + + [[ 2.9325e-01]], + + [[-1.9044e-01]], + + [[ 1.5765e-02]], + + [[ 3.6069e-01]]], + + + [[[-3.1390e-01]], + + [[ 8.8434e-02]], + + [[ 9.7708e-03]], + + [[-1.3168e-01]], + + [[-1.8078e-01]], + + [[ 3.4170e-01]]], + + + [[[ 3.9737e-01]], + + [[ 9.9929e-02]], + + [[-2.2831e-01]], + + [[ 7.8575e-02]], + + [[ 1.1516e-01]], + + [[-4.1168e-02]]], + + + [[[ 1.1644e-01]], + + [[ 1.3968e-01]], + + [[-4.2152e-01]], + + [[-8.8895e-02]], + + [[-1.2830e-01]], + + [[ 1.3976e-01]]], + + + [[[ 1.1205e-01]], + + [[ 8.5925e-02]], + + [[ 9.9078e-03]], + + [[-3.8285e-01]], + + [[-7.4555e-02]], + + [[-2.5344e-01]]], + + + [[[ 2.2091e-01]], + + [[-2.3072e-01]], + + [[ 7.7293e-02]], + + [[-1.1268e-01]], + + [[ 1.9157e-01]], + + [[ 2.2778e-01]]], + + + [[[ 9.1268e-02]], + + [[ 3.8338e-01]], + + [[ 2.7015e-01]], + + [[ 4.0708e-01]], + + [[ 3.0451e-01]], + + [[ 1.4679e-01]]], + + + [[[ 1.1113e-01]], + + [[-2.1473e-01]], + + [[-2.3165e-01]], + + [[-2.9479e-01]], + + [[ 1.6175e-01]], + + [[ 2.0146e-01]]], + + + [[[-1.8549e-01]], + + [[-3.7274e-01]], + + [[-1.1904e-01]], + + [[-2.2333e-01]], + + [[-2.9492e-01]], + + [[-1.1221e-01]]], + + + [[[-2.4567e-01]], + + [[ 2.2337e-01]], + + [[-1.8182e-01]], + + [[ 1.6006e-01]], + + [[-3.3278e-01]], + + [[ 8.2628e-02]]], + + + [[[-7.1921e-02]], + + [[ 4.7970e-01]], + + [[ 2.2855e-01]], + + [[-6.7084e-02]], + + [[-2.4283e-01]], + + [[-3.4564e-01]]], + + + [[[ 3.2821e-01]], + + [[ 4.3972e-02]], + + [[ 4.7644e-01]], + + [[-9.6587e-02]], + + [[ 2.6466e-01]], + + [[-3.2717e-01]]], + + + [[[-2.2952e-01]], + + [[-1.7947e-01]], + + [[ 5.3260e-02]], + + [[ 1.6502e-04]], + + [[ 1.6883e-01]], + + [[ 3.9021e-01]]], + + + [[[-7.8669e-02]], + + [[-1.8017e-01]], + + [[-2.8020e-01]], + + [[-5.0883e-02]], + + [[ 1.9554e-01]], + + [[-3.6963e-01]]], + + + [[[ 3.3137e-01]], + + [[-1.3725e-01]], + + [[ 2.3675e-01]], + + [[-2.7564e-01]], + + [[-1.6674e-01]], + + [[-5.4410e-02]]], + + + [[[ 2.6481e-01]], + + [[ 2.6339e-01]], + + [[ 1.3665e-01]], + + [[ 3.0428e-01]], + + [[ 3.5591e-02]], + + [[-1.8452e-01]]], + + + [[[ 3.2781e-01]], + + [[ 2.7568e-01]], + + [[-2.0978e-01]], + + [[-3.0407e-01]], + + [[-2.7580e-01]], + + [[ 4.0686e-01]]], + + + [[[ 1.2864e-02]], + + [[-9.5531e-02]], + + [[-3.5817e-01]], + + [[ 2.9295e-01]], + + [[-2.1446e-01]], + + [[-3.6447e-01]]], + + + [[[ 2.8606e-01]], + + [[ 2.3689e-01]], + + [[-2.7266e-01]], + + [[ 1.1546e-01]], + + [[ 3.4280e-01]], + + [[ 3.7387e-02]]], + + + [[[-1.2817e-01]], + + [[ 2.4413e-01]], + + [[ 9.1682e-03]], + + [[ 1.8532e-01]], + + [[ 3.1247e-01]], + + [[-2.8923e-01]]], + + + [[[ 1.1488e-01]], + + [[-1.5526e-01]], + + [[ 4.5381e-01]], + + [[ 5.7596e-02]], + + [[-1.6579e-01]], + + [[ 1.2082e-02]]], + + + [[[-1.3070e-01]], + + [[-1.1455e-01]], + + [[ 1.5317e-01]], + + [[-3.7932e-01]], + + [[-2.6226e-01]], + + [[-1.1890e-01]]], + + + [[[-1.2247e-01]], + + [[-2.5707e-01]], + + [[ 3.5119e-01]], + + [[ 1.1892e-01]], + + [[ 3.6864e-01]], + + [[-7.5822e-02]]], + + + [[[-1.3154e-02]], + + [[ 1.0138e-03]], + + [[-2.9948e-01]], + + [[ 4.8112e-02]], + + [[ 1.7462e-01]], + + [[-2.2746e-01]]], + + + [[[ 3.0904e-01]], + + [[-3.1280e-03]], + + [[-1.6808e-01]], + + [[ 2.4512e-01]], + + [[-3.1518e-02]], + + [[ 3.6987e-01]]], + + + [[[ 3.9461e-02]], + + [[ 2.2589e-01]], + + [[-2.4166e-01]], + + [[ 2.4574e-01]], + + [[ 3.6926e-01]], + + [[ 2.3307e-01]]], + + + [[[-1.9767e-01]], + + [[ 6.2172e-02]], + + [[-4.3647e-01]], + + [[ 3.2160e-01]], + + [[ 8.7762e-02]], + + [[-1.9560e-01]]], + + + [[[-2.2127e-01]], + + [[-2.0149e-01]], + + [[ 2.4634e-01]], + + [[ 3.4517e-01]], + + [[-3.9282e-01]], + + [[ 2.6289e-01]]], + + + [[[ 1.6667e-01]], + + [[-1.0436e-01]], + + [[ 2.1757e-01]], + + [[ 2.6280e-02]], + + [[-3.6860e-01]], + + [[ 1.7080e-01]]], + + + [[[ 2.1768e-01]], + + [[-1.5501e-01]], + + [[-2.0559e-01]], + + [[ 9.2478e-02]], + + [[ 1.7585e-01]], + + [[ 4.2018e-01]]], + + + [[[-3.0335e-01]], + + [[-2.3561e-01]], + + [[ 3.0734e-01]], + + [[-3.1052e-02]], + + [[ 2.9363e-01]], + + [[ 3.2434e-01]]], + + + [[[ 4.2264e-02]], + + [[ 2.2018e-01]], + + [[ 3.3634e-01]], + + [[-1.1733e-01]], + + [[-2.5139e-03]], + + [[ 3.4548e-01]]], + + + [[[-2.3592e-01]], + + [[-2.9571e-01]], + + [[ 2.2118e-01]], + + [[-6.8047e-02]], + + [[-7.3298e-02]], + + [[-2.4343e-01]]], + + + [[[-6.5636e-02]], + + [[ 4.6632e-01]], + + [[ 2.1816e-01]], + + [[ 1.7438e-01]], + + [[-3.1997e-01]], + + [[-3.0728e-01]]], + + + [[[ 2.2113e-01]], + + [[-4.0972e-01]], + + [[ 2.3259e-01]], + + [[ 2.4570e-01]], + + [[ 2.7233e-01]], + + [[ 1.7798e-01]]], + + + [[[ 1.3930e-01]], + + [[-5.6902e-02]], + + [[ 2.4334e-02]], + + [[-3.3007e-01]], + + [[-3.5026e-01]], + + [[ 1.8365e-01]]], + + + [[[-2.5644e-02]], + + [[-2.5099e-01]], + + [[-4.6090e-01]], + + [[ 1.0623e-01]], + + [[-2.1353e-01]], + + [[-2.8279e-02]]], + + + [[[ 2.8806e-01]], + + [[ 6.8980e-02]], + + [[-3.0154e-01]], + + [[ 1.3943e-01]], + + [[ 1.3693e-02]], + + [[-2.9514e-01]]], + + + [[[-2.0668e-01]], + + [[-3.5593e-03]], + + [[-1.9016e-01]], + + [[ 3.1383e-01]], + + [[-2.5593e-01]], + + [[-3.0315e-02]]], + + + [[[-1.0004e-01]], + + [[ 3.3673e-02]], + + [[ 4.8325e-02]], + + [[-3.7493e-01]], + + [[ 3.6143e-01]], + + [[-1.0765e-01]]], + + + [[[ 9.1495e-02]], + + [[-1.3414e-01]], + + [[ 3.6609e-01]], + + [[-4.6785e-02]], + + [[-1.0689e-02]], + + [[ 9.5395e-02]]], + + + [[[-3.1247e-01]], + + [[ 3.3789e-01]], + + [[ 1.3623e-01]], + + [[-8.0335e-02]], + + [[ 3.3184e-01]], + + [[-2.6190e-01]]], + + + [[[ 7.2412e-02]], + + [[-2.1318e-01]], + + [[-6.0852e-02]], + + [[-3.3148e-01]], + + [[-5.5988e-02]], + + [[-3.4279e-01]]], + + + [[[-3.0617e-01]], + + [[ 4.4216e-01]], + + [[-5.9481e-02]], + + [[ 5.6960e-03]], + + [[ 2.0375e-01]], + + [[ 2.6746e-01]]], + + + [[[ 5.8494e-02]], + + [[ 2.8531e-01]], + + [[ 1.4103e-01]], + + [[-1.9302e-01]], + + [[-2.6912e-01]], + + [[ 4.1443e-01]]], + + + [[[ 3.4545e-01]], + + [[ 2.7358e-01]], + + [[-3.1154e-01]], + + [[-3.6917e-01]], + + [[ 2.0780e-01]], + + [[-1.6365e-01]]], + + + [[[-2.9648e-01]], + + [[ 2.0200e-01]], + + [[-3.4865e-01]], + + [[-6.9275e-02]], + + [[ 2.4788e-02]], + + [[ 2.2516e-01]]], + + + [[[ 4.0150e-01]], + + [[-1.2902e-02]], + + [[-4.3386e-02]], + + [[-4.0886e-03]], + + [[ 9.6724e-02]], + + [[ 2.3699e-01]]], + + + [[[-3.2652e-01]], + + [[ 6.0269e-02]], + + [[ 1.5222e-01]], + + [[ 6.8210e-02]], + + [[-2.6621e-01]], + + [[ 2.9273e-01]]], + + + [[[-3.6190e-01]], + + [[-3.8317e-01]], + + [[-6.7452e-02]], + + [[ 1.7140e-01]], + + [[ 1.9434e-01]], + + [[-6.3713e-02]]], + + + [[[-2.0947e-01]], + + [[-2.6268e-01]], + + [[-3.2838e-01]], + + [[ 2.3146e-01]], + + [[ 2.9237e-01]], + + [[ 3.3499e-01]]], + + + [[[ 3.4520e-01]], + + [[ 2.9328e-01]], + + [[ 1.5479e-01]], + + [[ 2.1897e-01]], + + [[ 3.6710e-01]], + + [[ 1.4353e-02]]], + + + [[[-2.4809e-01]], + + [[ 3.4077e-01]], + + [[ 1.8677e-02]], + + [[ 2.1476e-01]], + + [[ 3.2505e-01]], + + [[ 1.9732e-01]]], + + + [[[-8.0248e-02]], + + [[-1.7515e-01]], + + [[ 1.4150e-03]], + + [[ 1.1113e-01]], + + [[ 3.4240e-01]], + + [[-2.2919e-01]]], + + + [[[-3.0809e-01]], + + [[ 3.9892e-01]], + + [[-2.0543e-01]], + + [[-2.7393e-01]], + + [[ 7.5262e-02]], + + [[ 2.9518e-01]]], + + + [[[ 2.7514e-01]], + + [[-5.2417e-02]], + + [[-3.7181e-01]], + + [[-1.1378e-02]], + + [[ 2.9306e-01]], + + [[ 1.5553e-01]]], + + + [[[-2.8404e-01]], + + [[-1.6838e-02]], + + [[-4.0706e-01]], + + [[ 1.0222e-01]], + + [[ 7.6919e-03]], + + [[-2.0316e-01]]], + + + [[[-3.2939e-01]], + + [[ 8.5744e-02]], + + [[ 2.8856e-01]], + + [[-1.1561e-01]], + + [[ 1.3532e-02]], + + [[-2.2282e-01]]], + + + [[[-2.9853e-01]], + + [[-3.4407e-01]], + + [[ 2.7208e-01]], + + [[-2.7022e-02]], + + [[ 3.3820e-01]], + + [[-5.6813e-02]]], + + + [[[-1.8058e-01]], + + [[-2.0420e-01]], + + [[-3.9273e-02]], + + [[ 1.7749e-01]], + + [[-3.0893e-01]], + + [[ 3.1474e-01]]], + + + [[[-3.4611e-01]], + + [[ 4.0315e-01]], + + [[ 2.4380e-01]], + + [[ 1.4928e-01]], + + [[ 8.3416e-02]], + + [[-2.8908e-01]]], + + + [[[-7.7601e-02]], + + [[ 1.9581e-01]], + + [[ 3.8646e-01]], + + [[-1.5215e-01]], + + [[ 2.3812e-01]], + + [[ 3.1755e-01]]]], device='cuda:0')), ('module.conv1.1.weight', tensor([0.9574, 1.0045, 0.9917, 0.9858, 0.9488, 1.0210, 0.9428, 0.9855, 0.9808, + 1.0112, 0.9839, 0.9913, 0.9923, 1.0267, 0.9602, 0.9188, 0.9699, 0.9656, + 0.9738, 0.9653, 0.9604, 0.9641, 0.9548, 0.9873, 0.9911, 0.9564, 0.9748, + 0.9592, 0.9849, 0.9579, 0.9809, 0.9939, 0.9642, 0.9904, 1.0288, 1.0064, + 0.9692, 0.9552, 0.9671, 0.9583, 0.9429, 0.9853, 1.0013, 0.9701, 0.9813, + 0.9638, 0.9797, 1.0066, 1.0186, 0.9919, 0.9907, 0.9759, 0.9834, 0.9508, + 0.9867, 0.9827, 0.9874, 0.9730, 0.9319, 1.0060, 0.9537, 0.9960, 0.9770, + 0.9807], device='cuda:0')), ('module.conv1.1.bias', tensor([ 0.0305, 0.0291, -0.0768, 0.0243, -0.0372, -0.0718, -0.0121, -0.0137, + 0.0166, -0.0936, 0.0054, 0.0373, 0.0364, -0.0265, -0.0594, 0.0420, + 0.0227, 0.0182, 0.0428, -0.0645, 0.0070, -0.0112, -0.0146, -0.1356, + -0.1362, -0.0237, 0.0919, -0.0086, 0.0144, 0.0529, -0.0482, -0.0354, + -0.0457, -0.0564, -0.0926, 0.0037, 0.0063, -0.0396, 0.1050, -0.0602, + -0.0052, -0.0825, 0.0018, -0.0235, -0.0165, 0.0554, -0.0396, -0.1206, + -0.1034, -0.0087, -0.0356, 0.0425, -0.0442, 0.0613, -0.0266, -0.0372, + -0.0379, 0.0205, 0.0219, -0.0351, 0.0276, -0.0068, 0.0126, 0.0259], + device='cuda:0')), ('module.conv1.1.running_mean', tensor([ 0.4261, -0.2351, -0.0712, 0.0420, 0.0837, -0.0322, -0.7881, -0.0599, + 0.7213, -0.3721, -0.3978, 0.3686, -0.3069, -0.4192, 0.2384, -0.3626, + -0.4639, 0.3693, -0.1827, 0.2668, 0.1630, 0.0617, 0.1277, -0.6631, + 0.0885, -0.1010, 0.6567, 0.5049, 0.3823, 0.8031, 0.2140, 0.4090, + 0.1239, 0.0412, -0.2650, 0.1269, 0.4824, -0.3648, 0.1873, 0.0273, + 0.5330, -0.7423, -0.0107, -0.3586, -0.7670, 0.1593, 0.0037, -0.7477, + 0.0346, 0.1387, 0.3498, 0.2089, 0.5619, 0.3137, 0.4367, -0.0240, + -0.2615, 0.0421, 0.0293, -0.3421, -0.1336, 0.5503, 0.0372, -0.0710], + device='cuda:0')), ('module.conv1.1.running_var', tensor([0.2694, 0.0716, 0.1177, 0.0739, 0.0200, 0.0286, 0.3689, 0.0699, 0.4158, + 0.2523, 0.1464, 0.1374, 0.0668, 0.0903, 0.0504, 0.0566, 0.1869, 0.2308, + 0.2683, 0.2841, 0.0777, 0.1263, 0.0258, 0.3566, 0.0876, 0.0275, 0.1828, + 0.1993, 0.2554, 0.4271, 0.0754, 0.0740, 0.0749, 0.0596, 0.0277, 0.1697, + 0.1722, 0.2911, 0.0592, 0.0659, 0.3018, 0.4561, 0.0079, 0.0923, 0.2932, + 0.0394, 0.1422, 0.3889, 0.0259, 0.0194, 0.0697, 0.0820, 0.1818, 0.1561, + 0.1527, 0.0832, 0.2142, 0.0518, 0.0353, 0.0464, 0.0632, 0.1624, 0.0737, + 0.1207], device='cuda:0')), ('module.conv1.1.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.conv2.0.weight', tensor([[[[-0.1125]], + + [[-0.1352]], + + [[-0.0106]], + + ..., + + [[-0.0328]], + + [[ 0.0343]], + + [[ 0.0844]]], + + + [[[ 0.0470]], + + [[-0.0552]], + + [[-0.1296]], + + ..., + + [[ 0.1282]], + + [[-0.0166]], + + [[ 0.0528]]], + + + [[[ 0.0722]], + + [[-0.1388]], + + [[-0.0396]], + + ..., + + [[-0.0567]], + + [[ 0.1044]], + + [[-0.0899]]], + + + ..., + + + [[[ 0.0152]], + + [[ 0.1304]], + + [[ 0.0813]], + + ..., + + [[ 0.0372]], + + [[ 0.0647]], + + [[-0.0233]]], + + + [[[ 0.0671]], + + [[ 0.0174]], + + [[ 0.0995]], + + ..., + + [[ 0.1065]], + + [[ 0.1166]], + + [[ 0.0627]]], + + + [[[ 0.0398]], + + [[-0.0513]], + + [[ 0.0374]], + + ..., + + [[-0.1242]], + + [[ 0.0975]], + + [[-0.0388]]]], device='cuda:0')), ('module.conv2.1.weight', tensor([0.9684, 0.9667, 0.9564, 0.9479, 0.9961, 0.9625, 0.9757, 0.9456, 0.9751, + 0.9444, 0.9595, 0.9667, 0.9565, 0.9626, 0.9481, 0.9734, 0.9405, 1.0031, + 0.9477, 0.9973, 0.9932, 0.9751, 1.0060, 0.9565, 0.9827, 0.9760, 0.9665, + 0.9774, 0.9421, 0.9769, 0.8953, 0.9879, 0.9662, 0.9784, 0.9913, 0.9729, + 0.9350, 0.9587, 0.9826, 0.9308, 0.9811, 1.0205, 0.9732, 0.9390, 0.9661, + 0.9299, 0.9724, 0.9757, 0.9669, 0.9653, 0.9339, 0.9573, 0.9675, 0.9864, + 0.9635, 0.9528, 0.9712, 0.9790, 0.9489, 0.9759, 0.9842, 0.9918, 0.9847, + 1.0030], device='cuda:0')), ('module.conv2.1.bias', tensor([-0.0045, -0.0266, 0.0105, -0.0438, -0.0005, -0.0032, -0.0288, -0.0133, + -0.0436, -0.0505, -0.0091, -0.0085, -0.0506, -0.0435, 0.0153, -0.0320, + -0.0064, -0.0247, -0.0292, -0.0906, 0.0223, -0.0192, -0.0435, 0.0024, + -0.0325, -0.0406, -0.0094, -0.0309, -0.0023, -0.0134, -0.1212, -0.0017, + 0.0083, -0.1009, -0.0380, -0.0313, -0.0720, -0.0227, -0.0228, -0.0550, + -0.0090, -0.0688, 0.0184, -0.0256, -0.0329, -0.0270, 0.0202, -0.0070, + 0.0145, 0.0047, -0.0547, -0.0024, 0.0222, -0.0281, -0.0149, 0.0228, + 0.0315, -0.0178, -0.0680, -0.0733, -0.0047, -0.0339, -0.0558, -0.0382], + device='cuda:0')), ('module.conv2.1.running_mean', tensor([-0.2502, -0.2023, -0.3157, 0.0034, -0.2458, -0.3484, 0.4152, -0.1173, + -0.3223, -0.1533, 0.1601, 0.0346, -0.2799, 0.0175, -0.2840, 0.3348, + -0.1910, 0.7102, 0.4342, -0.3117, 0.0183, 0.1721, 0.4742, 0.0622, + -0.2099, -0.1441, 0.3306, -0.2447, -0.2162, -0.2859, 0.0899, 0.0469, + -0.0212, -0.1063, 0.2861, -0.3290, 0.0419, -0.0589, -0.3808, 0.1361, + -0.0076, 0.6340, 0.0050, -0.0847, 0.1963, -0.0973, 0.2167, -0.1371, + -0.5370, 0.0038, 0.1249, 0.1643, -0.5423, 0.3050, -0.1512, 0.1519, + -0.2116, 0.2185, -0.0201, 0.0805, -0.4099, 0.4365, 0.4842, -0.3707], + device='cuda:0')), ('module.conv2.1.running_var', tensor([0.1821, 0.1045, 0.0811, 0.1067, 0.0362, 0.2297, 0.2936, 0.1205, 0.1147, + 0.1005, 0.0931, 0.5177, 0.2772, 0.1406, 0.0876, 0.0800, 0.0336, 0.1262, + 0.0960, 0.0215, 0.1063, 0.0900, 0.2601, 0.1508, 0.1184, 0.1061, 0.1276, + 0.0529, 0.1534, 0.1845, 0.2355, 0.0552, 0.0430, 0.0479, 0.0703, 0.0529, + 0.0888, 0.1233, 0.1285, 0.2397, 0.0884, 0.1318, 0.1151, 0.1675, 0.0955, + 0.2266, 0.1416, 0.1275, 0.0596, 0.0734, 0.2635, 0.0623, 0.1556, 0.6597, + 0.1636, 0.1391, 0.1487, 0.1740, 0.1156, 0.0364, 0.1959, 0.1808, 0.0507, + 0.1009], device='cuda:0')), ('module.conv2.1.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.conv3.0.weight', tensor([[[[-0.0651]], + + [[-0.0157]], + + [[-0.0238]], + + ..., + + [[ 0.0042]], + + [[-0.0460]], + + [[ 0.0946]]], + + + [[[-0.0859]], + + [[-0.0306]], + + [[ 0.1044]], + + ..., + + [[ 0.0341]], + + [[-0.0064]], + + [[ 0.0359]]], + + + [[[ 0.0784]], + + [[-0.0268]], + + [[-0.0191]], + + ..., + + [[-0.0673]], + + [[-0.1045]], + + [[-0.0927]]], + + + ..., + + + [[[ 0.0452]], + + [[-0.0482]], + + [[ 0.0301]], + + ..., + + [[ 0.0892]], + + [[ 0.0754]], + + [[-0.1081]]], + + + [[[-0.0892]], + + [[-0.0643]], + + [[ 0.0865]], + + ..., + + [[-0.0626]], + + [[-0.0515]], + + [[-0.0085]]], + + + [[[-0.0115]], + + [[ 0.0540]], + + [[-0.0021]], + + ..., + + [[-0.0678]], + + [[ 0.0963]], + + [[ 0.0752]]]], device='cuda:0')), ('module.conv3.1.weight', tensor([0.9839, 0.9673, 0.9477, 0.9683, 0.9668, 0.9498, 0.9569, 0.9838, 0.9390, + 0.9897, 0.9579, 0.9353, 0.9943, 0.9474, 0.9777, 0.9801, 0.9576, 0.9584, + 0.9699, 0.9766, 0.9699, 0.9457, 0.9526, 1.0207, 1.0056, 0.9974, 0.9600, + 0.9917, 0.9797, 0.9582, 0.9479, 0.9490, 0.9719, 0.9551, 0.9849, 0.9607, + 0.9838, 0.9388, 0.9446, 0.9625, 0.9870, 0.9498, 0.9769, 0.9684, 0.9197, + 0.9597, 0.9652, 0.9739, 0.9642, 0.9494, 0.9590, 0.9842, 0.9826, 0.9470, + 1.0147, 0.9712, 0.9521, 0.9214, 0.9603, 0.9889, 1.0057, 0.9544, 0.9696, + 0.9579], device='cuda:0')), ('module.conv3.1.bias', tensor([-0.0331, -0.0112, 0.0057, 0.0016, -0.1097, -0.0550, 0.0029, 0.0223, + -0.0571, 0.0300, -0.0035, 0.0169, 0.0289, -0.0392, 0.0104, -0.0550, + 0.0104, 0.0113, -0.0644, 0.0012, -0.0323, -0.0068, -0.0942, 0.0448, + -0.0752, -0.1161, -0.0092, -0.0118, -0.0095, -0.0124, -0.0480, 0.0430, + 0.0315, -0.0293, -0.0129, -0.0043, -0.0681, -0.0548, -0.0436, -0.0252, + -0.0523, 0.0081, -0.0450, -0.0149, -0.0213, -0.0878, -0.0241, -0.0552, + 0.0046, -0.0224, -0.0077, -0.0313, -0.0356, -0.0055, -0.0509, -0.0015, + -0.0171, -0.1119, 0.0044, -0.0294, 0.0841, -0.0126, -0.0042, -0.0767], + device='cuda:0')), ('module.conv3.1.running_mean', tensor([-0.1179, -0.2109, -0.1052, 0.1110, 0.0015, 0.0690, -0.1439, -0.2045, + -0.2490, -0.2555, 0.2528, -0.3513, -0.0930, -0.3589, 0.1332, -0.1093, + 0.0311, 0.0076, -0.3407, 0.2026, -0.0316, 0.1119, -0.1663, -0.2405, + -0.1303, 0.0465, 0.0824, -0.2038, 0.2158, 0.2773, -0.0258, -0.3768, + 0.2864, -0.3150, -0.1850, -0.4120, -0.2422, 0.3398, 0.3062, 0.1578, + 0.1047, 0.1097, -0.0118, 0.0271, -0.1421, 0.1104, -0.3231, 0.1490, + 0.3098, 0.0313, -0.0711, -0.2694, -0.0477, -0.2847, -0.2043, 0.0472, + 0.1980, -0.3076, -0.1446, -0.0578, -0.0207, 0.0126, 0.0509, 0.0559], + device='cuda:0')), ('module.conv3.1.running_var', tensor([0.0311, 0.0632, 0.0329, 0.0725, 0.0299, 0.2389, 0.1650, 0.1427, 0.0887, + 0.0531, 0.2355, 0.1415, 0.0939, 0.1160, 0.0877, 0.0485, 0.0715, 0.0975, + 0.0936, 0.0627, 0.0604, 0.0356, 0.0338, 0.0462, 0.0580, 0.0668, 0.0318, + 0.0964, 0.1594, 0.0955, 0.0424, 0.0889, 0.1725, 0.0734, 0.1686, 0.0895, + 0.0735, 0.0800, 0.0571, 0.1956, 0.0849, 0.0937, 0.1129, 0.1316, 0.1148, + 0.1620, 0.1119, 0.1293, 0.1508, 0.0745, 0.0389, 0.0590, 0.1822, 0.0635, + 0.0297, 0.1179, 0.0884, 0.0759, 0.0723, 0.1092, 0.0566, 0.3219, 0.0481, + 0.0495], device='cuda:0')), ('module.conv3.1.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.conv4.0.weight', tensor([[[[ 0.0603]], + + [[ 0.0592]], + + [[-0.0017]], + + ..., + + [[ 0.0525]], + + [[ 0.0709]], + + [[-0.1551]]], + + + [[[ 0.0937]], + + [[-0.0823]], + + [[ 0.0179]], + + ..., + + [[ 0.0962]], + + [[-0.0439]], + + [[ 0.0668]]], + + + [[[ 0.0351]], + + [[-0.0672]], + + [[-0.0933]], + + ..., + + [[-0.0263]], + + [[-0.0901]], + + [[ 0.0942]]], + + + ..., + + + [[[-0.0536]], + + [[ 0.0353]], + + [[-0.1032]], + + ..., + + [[-0.1114]], + + [[-0.0003]], + + [[ 0.1405]]], + + + [[[-0.0598]], + + [[ 0.0873]], + + [[-0.1155]], + + ..., + + [[-0.0772]], + + [[ 0.0063]], + + [[ 0.0468]]], + + + [[[ 0.0329]], + + [[ 0.0613]], + + [[ 0.0874]], + + ..., + + [[ 0.0760]], + + [[-0.0332]], + + [[ 0.0263]]]], device='cuda:0')), ('module.conv4.1.weight', tensor([0.9719, 0.9288, 0.9404, 0.8981, 0.9558, 0.8647, 0.9938, 0.9839, 0.9504, + 0.8829, 0.9852, 0.9685, 0.9392, 0.9665, 0.9397, 0.9686, 0.9511, 0.9347, + 0.9005, 0.9341, 0.9637, 0.9782, 0.9068, 0.9944, 0.8678, 0.9496, 0.8791, + 0.8913, 0.9705, 0.9599, 0.9173, 0.9419, 0.9714, 0.8662, 0.9614, 0.9636, + 0.8761, 0.9026, 0.9590, 1.0051, 0.9332, 0.9884, 0.9620, 0.9963, 0.9344, + 0.9917, 0.9104, 0.9644, 0.9793, 0.9491, 0.9661, 0.9749, 0.9918, 0.9274, + 0.9731, 0.9629, 0.9864, 0.8924, 0.9374, 0.9265, 1.0030, 0.9332, 0.9231, + 0.9744], device='cuda:0')), ('module.conv4.1.bias', tensor([-0.0290, -0.0381, -0.0183, -0.0843, -0.0786, -0.0576, -0.0321, -0.0728, + -0.0004, -0.0501, -0.0512, -0.0098, -0.0458, -0.0700, -0.0562, -0.0489, + -0.0136, -0.0957, -0.2043, 0.0487, -0.0604, -0.0490, -0.1111, -0.0637, + -0.1042, -0.1490, -0.0399, -0.0834, -0.0637, -0.0709, -0.0532, -0.0383, + -0.0008, -0.0157, -0.0126, 0.0105, -0.1265, 0.0032, 0.0011, -0.0024, + -0.0607, -0.1279, -0.0568, 0.0023, 0.0132, -0.0262, -0.1053, -0.0529, + -0.0576, -0.0367, -0.1104, -0.0479, -0.0123, -0.0814, -0.0047, -0.0162, + -0.0702, -0.0519, -0.0512, -0.0542, 0.0203, -0.0377, -0.0304, -0.0392], + device='cuda:0')), ('module.conv4.1.running_mean', tensor([-2.7154e-01, 2.5188e-01, -2.3083e-01, -1.1752e-04, -1.6071e-01, + -1.0288e-03, -1.6060e-01, -3.1812e-01, -5.0881e-01, -2.1096e-01, + -2.6173e-01, -4.0433e-02, -2.6942e-01, -1.7432e-01, -2.6561e-01, + 1.9310e-01, 2.3981e-01, 1.3317e-02, -4.9114e-01, -2.9459e-01, + 1.8738e-01, -3.9850e-02, -4.4220e-01, 6.4368e-03, 1.0282e-01, + -1.7609e-01, -1.8123e-01, 5.2415e-02, 2.9093e-01, -1.0226e-01, + 2.5449e-01, 7.9838e-02, -1.5610e-01, -3.8982e-01, -1.1930e-01, + -7.5275e-02, -1.0730e-01, 6.2043e-02, 1.9573e-02, -4.8201e-01, + -8.2139e-02, -1.5754e-01, -2.3653e-01, -2.1203e-01, -2.7148e-01, + -1.2872e-02, 2.1902e-01, -6.1971e-02, 2.5442e-01, -9.4199e-02, + 8.5456e-02, -3.1384e-01, -5.2701e-02, -1.8283e-02, 9.5014e-05, + -3.0140e-02, -1.3224e-01, -1.0458e-02, 3.2202e-03, -2.1161e-01, + -3.2415e-02, 1.9768e-01, -1.9820e-03, -6.1268e-02], device='cuda:0')), ('module.conv4.1.running_var', tensor([0.0775, 0.1517, 0.1208, 0.1465, 0.1560, 0.1655, 0.1435, 0.0978, 0.0970, + 0.0966, 0.0883, 0.0750, 0.2008, 0.0495, 0.1411, 0.2070, 0.3223, 0.2494, + 0.0689, 0.0628, 0.1408, 0.2248, 0.1027, 0.0535, 0.1308, 0.0686, 0.2316, + 0.2367, 0.1280, 0.0964, 0.1228, 0.1370, 0.0837, 0.0489, 0.1195, 0.0963, + 0.1629, 0.3504, 0.2324, 0.0411, 0.0992, 0.0932, 0.1617, 0.0878, 0.2154, + 0.1070, 0.2407, 0.2999, 0.0624, 0.1566, 0.1196, 0.0817, 0.1615, 0.2499, + 0.1819, 0.1056, 0.0507, 0.2700, 0.0885, 0.1793, 0.1004, 0.0950, 0.2143, + 0.1332], device='cuda:0')), ('module.conv4.1.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.conv5.0.weight', tensor([[[[-0.0333]], + + [[ 0.0173]], + + [[-0.0615]], + + ..., + + [[-0.0270]], + + [[-0.0410]], + + [[ 0.0562]]], + + + [[[-0.0775]], + + [[-0.0334]], + + [[-0.0685]], + + ..., + + [[ 0.0094]], + + [[ 0.0429]], + + [[-0.0700]]], + + + [[[-0.0256]], + + [[-0.0793]], + + [[-0.0084]], + + ..., + + [[-0.0699]], + + [[ 0.0136]], + + [[-0.0940]]], + + + ..., + + + [[[ 0.0972]], + + [[ 0.0150]], + + [[-0.0266]], + + ..., + + [[ 0.1032]], + + [[ 0.0564]], + + [[-0.0423]]], + + + [[[-0.1479]], + + [[ 0.1105]], + + [[ 0.0604]], + + ..., + + [[ 0.0813]], + + [[-0.0563]], + + [[-0.0304]]], + + + [[[ 0.0139]], + + [[-0.0603]], + + [[-0.0718]], + + ..., + + [[ 0.0712]], + + [[ 0.0877]], + + [[-0.0223]]]], device='cuda:0')), ('module.conv5.1.weight', tensor([0.9956, 0.8218, 0.9660, 0.8502, 0.9757, 0.9295, 0.9911, 0.9380, 1.0068, + 0.8971, 0.9877, 0.9681, 0.8669, 0.9044, 0.9771, 0.9356, 0.9960, 0.9869, + 0.9540, 0.8842, 0.8905, 0.9738, 0.8970, 0.9943, 0.9446, 1.0118, 0.8025, + 0.9723, 0.9274, 0.8401, 0.9620, 0.9956, 0.9676, 0.9358, 1.0078, 0.8527, + 0.9738, 0.9063, 0.8941, 0.9772, 0.7482, 0.9677, 0.8094, 0.7970, 0.9199, + 0.9855, 1.0018, 0.9199, 0.9615, 0.9797, 0.9876, 0.9846, 0.9892, 0.9019, + 0.8533, 0.8955, 0.9516, 0.9545, 0.9719, 0.8642, 1.0412, 0.9918, 1.0091, + 1.0158], device='cuda:0')), ('module.conv5.1.bias', tensor([-1.7646e-02, -4.8253e-02, -2.8858e-02, -4.7392e-02, -7.9532e-02, + -3.0752e-02, -9.9099e-02, -2.5154e-02, -1.1934e-01, -3.5847e-02, + -7.5621e-02, -8.6020e-02, -3.6303e-02, -6.6361e-02, -5.8640e-02, + -1.0676e-01, -1.1631e-01, -1.3472e-01, -9.7941e-02, -9.1114e-02, + -1.9563e-02, -2.5152e-01, -7.3845e-02, -9.8564e-02, -4.4096e-02, + -1.1259e-01, -2.3151e-02, -1.2702e-01, -3.4798e-02, -1.9918e-02, + -6.9000e-02, -1.7100e-01, -1.5731e-01, -6.0053e-05, -1.2194e-01, + -3.8141e-02, -1.7316e-01, -5.8246e-02, -8.9738e-02, -1.2313e-01, + -4.2175e-02, -1.1261e-01, -1.0774e-01, -6.2062e-02, -9.0335e-02, + -1.6811e-01, -1.0884e-01, -6.7475e-02, -1.1029e-02, 1.2906e-02, + -4.5145e-02, -1.1859e-01, -3.8917e-02, -5.6922e-02, -3.2910e-02, + -6.9608e-02, -3.5666e-02, -4.6286e-02, -1.1235e-01, -5.8298e-02, + -1.1326e-01, -1.2617e-01, -6.3157e-02, -8.4112e-03], device='cuda:0')), ('module.conv5.1.running_mean', tensor([-0.1671, 0.1362, 0.1261, -0.0208, 0.1527, 0.2063, -0.2192, 0.0798, + -0.1073, 0.0608, -0.2205, 0.1398, -0.3201, 0.2858, -0.1489, -0.0047, + 0.1180, 0.1311, 0.3355, -0.2368, -0.2183, -0.1878, 0.6287, 0.0602, + -0.0263, 0.0246, -0.0975, -0.3889, -0.4703, 0.1642, -0.2359, -0.4039, + -0.1448, 0.1513, -0.0390, -0.0513, 0.2277, 0.2182, 0.1174, 0.0410, + 0.1761, 0.0427, -0.0545, -0.1477, -0.1921, -0.1408, 0.1446, 0.0492, + -0.3001, -0.0347, 0.1064, 0.1110, 0.0973, -0.2868, -0.0742, 0.0385, + 0.1662, -0.0487, 0.1617, 0.2526, -0.3219, -0.0930, 0.3640, -0.4084], + device='cuda:0')), ('module.conv5.1.running_var', tensor([0.0803, 0.0675, 0.0971, 0.1109, 0.0468, 0.0587, 0.0560, 0.0595, 0.0618, + 0.0459, 0.0902, 0.0550, 0.0831, 0.0709, 0.1072, 0.0885, 0.0780, 0.0793, + 0.0913, 0.0720, 0.0550, 0.0554, 0.0756, 0.0593, 0.0704, 0.0716, 0.0796, + 0.0522, 0.0605, 0.1511, 0.0628, 0.0814, 0.1140, 0.0964, 0.0593, 0.0558, + 0.0378, 0.0449, 0.0545, 0.0527, 0.0319, 0.0490, 0.0390, 0.0524, 0.0928, + 0.1487, 0.1069, 0.0375, 0.1076, 0.0694, 0.0599, 0.0765, 0.0604, 0.0294, + 0.0531, 0.0723, 0.0891, 0.0805, 0.0483, 0.0379, 0.0725, 0.0848, 0.0763, + 0.0467], device='cuda:0')), ('module.conv5.1.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.conv6.0.weight', tensor([[[-0.0024], + [ 0.0037], + [ 0.0051], + ..., + [ 0.0087], + [ 0.0263], + [-0.0192]], + + [[ 0.0002], + [ 0.0085], + [-0.0037], + ..., + [-0.0246], + [-0.0036], + [ 0.0104]], + + [[ 0.0051], + [-0.0065], + [-0.0285], + ..., + [-0.0123], + [-0.0211], + [-0.0255]], + + ..., + + [[ 0.0006], + [-0.0007], + [ 0.0223], + ..., + [-0.0017], + [-0.0234], + [ 0.0043]], + + [[-0.0038], + [ 0.0091], + [-0.0054], + ..., + [-0.0029], + [ 0.0174], + [ 0.0053]], + + [[-0.0159], + [-0.0069], + [ 0.0122], + ..., + [-0.0026], + [-0.0228], + [ 0.0154]]], device='cuda:0')), ('module.conv6.1.weight', tensor([0.3100, 0.1241, 0.1257, ..., 0.2415, 0.1580, 0.3044], device='cuda:0')), ('module.conv6.1.bias', tensor([-7.5920e-09, -1.2804e-08, 2.9380e-09, ..., 1.4118e-09, + -5.5393e-09, -1.4208e-08], device='cuda:0')), ('module.conv6.1.running_mean', tensor([ 0.1350, -0.0502, -0.2535, ..., -0.0404, -0.0691, 0.1088], + device='cuda:0')), ('module.conv6.1.running_var', tensor([0.0224, 0.0177, 0.0538, ..., 0.0379, 0.0629, 0.0546], device='cuda:0')), ('module.conv6.1.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.conv7.0.weight', tensor([[[ 0.1007], + [-0.0574], + [-0.0414], + ..., + [-0.0379], + [ 0.2423], + [ 0.1333]], + + [[ 0.1576], + [ 0.0932], + [-0.0702], + ..., + [ 0.1092], + [-0.0735], + [ 0.1162]], + + [[-0.0314], + [-0.0033], + [ 0.0704], + ..., + [ 0.0184], + [ 0.0483], + [-0.2117]], + + ..., + + [[ 0.0765], + [-0.2088], + [-0.0608], + ..., + [-0.0596], + [-0.2131], + [-0.0600]], + + [[-0.0705], + [-0.0434], + [-0.1741], + ..., + [ 0.1960], + [ 0.1586], + [ 0.2274]], + + [[-0.1057], + [ 0.0211], + [-0.1496], + ..., + [ 0.1945], + [ 0.2066], + [ 0.2240]]], device='cuda:0')), ('module.conv7.1.weight', tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], device='cuda:0')), ('module.conv7.1.bias', tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + device='cuda:0')), ('module.conv7.1.running_mean', tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + device='cuda:0')), ('module.conv7.1.running_var', tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], device='cuda:0')), ('module.conv7.1.num_batches_tracked', tensor(0, device='cuda:0')), ('module.conv8.0.weight', tensor([[[ 0.0140], + [-0.0007], + [-0.0001], + ..., + [ 0.0318], + [-0.0551], + [-0.0595]], + + [[-0.0007], + [-0.0014], + [-0.0057], + ..., + [-0.0184], + [-0.0062], + [ 0.0331]], + + [[-0.0047], + [-0.0121], + [-0.0016], + ..., + [-0.0059], + [-0.0562], + [-0.0742]], + + ..., + + [[ 0.0075], + [-0.0069], + [-0.0004], + ..., + [ 0.0653], + [-0.0303], + [-0.0125]], + + [[ 0.0237], + [-0.0021], + [-0.0021], + ..., + [ 0.0249], + [ 0.0164], + [ 0.0275]], + + [[-0.0150], + [ 0.0051], + [ 0.0061], + ..., + [-0.0292], + [ 0.0239], + [ 0.0047]]], device='cuda:0')), ('module.conv8.1.weight', tensor([0.5975, 0.6336, 0.6173, 0.6104, 0.6317, 0.6728, 0.5002, 0.6813, 0.4865, + 0.5985, 0.6397, 0.6842, 0.5258, 0.5911, 0.5789, 0.5546, 0.5426, 0.6505, + 0.6261, 0.5844, 0.7298, 0.5456, 0.5810, 0.6098, 0.5354, 0.5674, 0.5305, + 0.5879, 0.6743, 0.6612, 0.5703, 0.6846, 0.5236, 0.4663, 0.5830, 0.6295, + 0.6389, 0.6141, 0.6196, 0.5281, 0.5320, 0.5720, 0.5801, 0.5960, 0.4653, + 0.6481, 0.6471, 0.5226, 0.5663, 0.5829, 0.6810, 0.5656, 0.5270, 0.5450, + 0.6240, 0.4726, 0.6167, 0.5670, 0.5743, 0.6302, 0.6045, 0.5235, 0.6135, + 0.6238, 0.6437, 0.6064, 0.6152, 0.6007, 0.5392, 0.5754, 0.5522, 0.5770, + 0.5522, 0.5666, 0.6488, 0.6184, 0.6450, 0.6074, 0.6663, 0.6806, 0.5845, + 0.5674, 0.4125, 0.6113, 0.5152, 0.5826, 0.5761, 0.5295, 0.6270, 0.6220, + 0.6417, 0.5565, 0.5432, 0.5742, 0.6486, 0.5560, 0.5912, 0.4814, 0.6064, + 0.5354, 0.5963, 0.5535, 0.6901, 0.4843, 0.6334, 0.5833, 0.6275, 0.6893, + 0.5902, 0.4670, 0.5237, 0.5259, 0.6049, 0.5312, 0.5465, 0.5641, 0.5297, + 0.6019, 0.6115, 0.5208, 0.5313, 0.5721, 0.7261, 0.5799, 0.5644, 0.6148, + 0.6433, 0.6579, 0.5728, 0.6637, 0.4920, 0.5348, 0.6054, 0.6967, 0.6347, + 0.6901, 0.5789, 0.5934, 0.6175, 0.5708, 0.5831, 0.5093, 0.5488, 0.6062, + 0.5265, 0.4917, 0.5884, 0.5979, 0.5923, 0.6651, 0.5131, 0.5750, 0.5136, + 0.5866, 0.6611, 0.6533, 0.5531, 0.6480, 0.5213, 0.5444, 0.4909, 0.6320, + 0.6691, 0.6666, 0.5485, 0.5622, 0.6017, 0.6123, 0.5685, 0.6451, 0.6198, + 0.6232, 0.6251, 0.5639, 0.6279, 0.6657, 0.6848, 0.5567, 0.5818, 0.5247, + 0.5614, 0.6785, 0.5420, 0.6355, 0.5918, 0.5717, 0.5690, 0.6593, 0.5990, + 0.6394, 0.5861, 0.5881, 0.6242, 0.5771, 0.6306, 0.6191, 0.6725, 0.6106, + 0.5701, 0.5729, 0.6371, 0.5420, 0.5653, 0.4356, 0.6391, 0.5974, 0.6171, + 0.4873, 0.5479, 0.6097, 0.5093, 0.5626, 0.5206, 0.5711, 0.6823, 0.4830, + 0.6145, 0.5612, 0.5915, 0.6442, 0.6279, 0.5503, 0.5646, 0.5565, 0.6741, + 0.4962, 0.6118, 0.5162, 0.5619, 0.6170, 0.5969, 0.5109, 0.5657, 0.6046, + 0.5199, 0.5501, 0.5052, 0.5363, 0.5820, 0.5483, 0.5127, 0.5824, 0.6109, + 0.6180, 0.5710, 0.5554, 0.7066, 0.5692, 0.7009, 0.6320, 0.6670, 0.5894, + 0.6487, 0.6074, 0.5676, 0.4965], device='cuda:0')), ('module.conv8.1.bias', tensor([-2.4603e-01, -2.0420e-01, -2.3926e-01, -1.8113e-01, 3.6277e-03, + -3.4310e-02, -4.5211e-02, -1.3239e-01, -3.1027e-01, -2.1496e-01, + -1.8401e-01, -1.6175e-01, -1.6365e-01, -1.8879e-01, -2.0187e-01, + -2.2995e-01, -1.9215e-01, 1.7450e-02, -1.4591e-01, -9.0315e-03, + -6.4923e-02, -1.9039e-02, -6.7852e-02, -1.6543e-01, -5.1770e-01, + -2.6707e-01, -5.8962e-02, -1.8673e-01, -8.9573e-02, -8.6647e-02, + -9.3134e-03, -1.1019e-01, -4.8615e-02, 1.3133e-02, -2.2098e-01, + -1.2943e-01, -1.5607e-01, -3.7629e-02, -2.3344e-01, -2.0304e-01, + -9.4395e-02, -3.2505e-02, -9.0034e-02, -2.5953e-01, -3.2737e-01, + -2.5741e-01, -6.1232e-02, 3.1437e-03, 1.3686e-02, -1.5474e-01, + -1.4603e-01, -7.8823e-02, -2.3128e-01, -1.9212e-02, -5.2570e-01, + -1.2511e-02, -1.4340e-01, 1.1813e-02, 9.9758e-04, -1.9165e-01, + -3.2000e-01, 4.9402e-02, -1.5039e-01, -1.2290e-01, -1.0617e-01, + -4.6460e-01, 1.0833e-02, -2.1422e-01, -2.6868e-01, -1.3662e-01, + -6.2994e-03, -2.9373e-01, -2.4041e-01, -3.6354e-01, -2.7847e-01, + -1.8632e-01, -2.4994e-01, -1.2377e-02, -1.7033e-01, -1.0021e-01, + -6.9008e-02, -4.1822e-01, -4.7919e-04, -1.1401e-01, -9.0716e-03, + -1.8538e-01, -9.9036e-03, -6.5337e-02, -1.0455e-01, -2.3924e-01, + -2.0551e-01, 1.7212e-02, -2.5079e-01, 2.5922e-02, -1.8929e-01, + -3.2034e-02, -2.0613e-01, -3.7895e-01, -5.2031e-02, -1.0013e-01, + -9.6603e-02, -1.0561e-01, -1.2527e-01, -3.1434e-02, -3.8905e-01, + -1.4895e-01, -1.1154e-01, -1.0705e-01, -1.2938e-01, -5.0385e-03, + -1.1143e-01, -9.2698e-02, -1.1573e-01, -1.6283e-01, 2.4557e-02, + -3.4486e-01, -4.5515e-01, -2.3260e-01, -1.4217e-01, -2.0953e-01, + -2.5524e-01, -4.7827e-01, -1.2062e-01, -1.4182e-01, -3.8212e-02, + -1.2130e-01, 1.2211e-02, -1.3204e-01, 1.1787e-02, -2.8765e-01, + -1.0789e-01, -7.5041e-02, -1.9882e-01, 1.9179e-02, -1.8313e-01, + -1.2570e-01, -4.3861e-02, -1.4641e-01, -2.7835e-01, -1.2664e-01, + 3.1255e-02, -2.2323e-01, 5.9589e-02, -3.2981e-02, -8.7146e-02, + -1.5633e-02, -2.4376e-01, -1.6883e-01, -3.1566e-02, -1.0349e-01, + -5.2299e-01, -1.4859e-01, -4.2795e-01, -3.1998e-03, 2.6797e-03, + -1.8901e-01, -3.8608e-02, 2.6671e-02, -1.0715e-02, -2.2606e-01, + -5.4738e-01, -1.1043e-01, -7.5873e-02, -1.1971e-01, -5.6222e-02, + 1.6415e-02, -2.0531e-01, -1.8289e-01, -2.2948e-01, 3.4413e-03, + -1.9205e-03, -2.1054e-01, -1.8685e-01, -3.4275e-01, -1.8347e-01, + -1.3209e-01, -1.1491e-01, -5.9706e-02, -2.8571e-01, 2.4211e-03, + -2.6390e-01, -1.6340e-01, -2.9284e-01, -1.1333e-01, -1.1147e-01, + -2.0640e-01, -1.0543e-01, -1.1463e-01, -4.9580e-01, -2.6341e-03, + -3.7357e-02, -2.0058e-02, -7.4550e-02, -2.1391e-01, -1.3184e-01, + -1.4010e-01, -1.5889e-01, -3.5747e-01, -4.4569e-01, -3.3433e-01, + -1.4844e-01, -6.5637e-02, -1.7807e-03, 2.9938e-02, -1.7752e-01, + -1.8577e-01, -5.2774e-02, -4.4365e-01, -1.5993e-03, -2.5719e-01, + 8.6095e-03, -1.8116e-01, 9.6789e-03, -6.1000e-02, -6.5450e-02, + -8.2867e-02, -1.5450e-01, -5.1060e-03, -4.4363e-01, -2.9718e-01, + -1.0969e-01, 3.3728e-02, 1.0786e-02, 3.0184e-02, -7.2835e-02, + -2.5034e-01, -2.2120e-01, -2.9737e-02, -1.2910e-01, -2.3450e-01, + -1.5038e-01, -2.2467e-02, -2.3203e-01, -4.2806e-01, -1.4618e-01, + 8.2870e-02, 3.8532e-02, -1.4712e-02, -5.2360e-02, -2.1458e-01, + -1.8515e-01, -9.4737e-02, -2.1847e-01, -1.4109e-01, -2.9911e-02, + -2.3604e-01, -1.5992e-01, -1.9666e-01, -1.5877e-01, -1.6443e-01, + -2.1488e-01, -2.3350e-01, 2.9479e-02, -3.1726e-01, 4.1851e-02, + -2.6533e-01], device='cuda:0')), ('module.conv8.1.running_mean', tensor([-2.0283e+00, 4.0840e-02, 3.5831e-03, -6.7090e-02, 5.9631e-01, + -4.8755e-01, -9.9364e-01, -8.5954e-03, 2.9753e-01, -5.4256e-01, + -4.1445e-01, -9.8848e-01, -9.7389e-02, -5.0073e-01, -4.5812e-01, + -6.7543e-01, -8.2462e-01, -1.2276e+00, 1.2158e-01, -6.1915e-01, + 4.7943e-01, -7.9419e-01, 2.5277e-01, -5.8727e-01, -8.2322e-01, + -5.7813e-01, 2.0851e-01, -8.3000e-01, -1.4634e+00, -8.6428e-02, + -9.0040e-01, -1.2791e+00, -1.8846e+00, -5.8082e-01, -5.0797e-01, + -1.1480e+00, 2.5509e-01, -5.7088e-01, -6.3888e-01, -1.6245e+00, + 6.8897e-01, 2.0542e+00, 4.1608e-01, 4.0017e-02, -4.8262e-01, + 1.7681e-01, -1.5150e+00, -5.6165e-01, 1.1971e-01, -4.9809e-01, + -2.7745e-01, -4.5986e-02, 9.1302e-01, 7.3921e-01, 1.1986e-01, + 5.7101e-01, 8.3916e-01, -7.2217e-01, -8.8311e-02, 8.9938e-01, + -8.5279e-01, -9.8137e-01, -1.2328e+00, -2.1072e-02, 1.3524e-01, + -8.6072e-01, 7.3041e-01, 1.9557e-01, -1.7824e+00, -4.7073e-01, + -2.0247e-01, -2.0033e+00, -3.9386e-01, -2.1243e+00, -6.5067e-01, + -7.1110e-01, -1.4061e-01, 2.2077e+00, -4.1148e-01, -5.9354e-01, + -1.7983e-01, -3.1042e-02, -4.5746e-01, -6.5795e-01, 4.1195e-01, + 1.1041e+00, 7.0968e-01, 5.2546e-02, 1.7824e-01, -1.3639e+00, + 2.2819e-01, 1.5307e+00, -4.4448e-01, 2.5587e-01, -5.7763e-01, + -9.7971e-01, 7.9011e-01, -7.9087e-01, 1.0300e+00, -1.8775e+00, + 7.8660e-01, 7.5173e-01, 1.0607e-01, 3.7644e-01, -3.3142e-01, + -1.2753e+00, 5.8203e-01, 2.6286e-01, -6.5150e-01, -1.5330e+00, + -9.6321e-01, -6.4370e-01, 4.2368e-01, 4.9135e-01, -9.7401e-01, + -3.6962e-01, -9.7116e-01, -5.9903e-01, -2.5891e-01, -9.5421e-01, + -1.1445e+00, 9.6036e-02, 1.1855e-01, -1.3633e+00, 5.1252e-03, + 3.7890e-02, 1.3806e+00, -3.6376e-01, -1.0937e-01, -1.1760e-01, + -2.5344e-01, -3.7274e-01, -1.4159e-01, 2.3785e-01, 2.0710e-01, + -8.8444e-01, 3.0720e-01, -6.4719e-01, 5.4683e-01, -1.8802e-01, + 4.3419e-01, -8.1701e-01, 4.0321e-02, 7.1071e-01, -1.8930e+00, + 1.0860e+00, 1.0797e+00, -1.5746e-01, 6.8257e-01, -7.0189e-02, + -1.1446e+00, -5.0258e-01, -2.9839e-01, -2.4193e-01, -7.6033e-01, + -1.3785e+00, -1.0725e+00, 4.0829e-01, -1.6946e+00, 4.4058e-01, + -5.8764e-01, -2.0746e-01, 6.0239e-01, 7.3826e-01, 1.0900e+00, + -7.1778e-02, 2.8387e-01, -7.5113e-01, 2.7873e-01, 3.2644e-01, + -1.3364e+00, 1.0656e+00, -5.9139e-04, -2.8503e-01, -6.4286e-01, + -6.1208e-02, 1.0951e+00, -3.6838e-01, -2.1318e-01, 1.8973e-01, + -2.0989e-01, -2.3763e-01, -1.0747e+00, -1.9353e+00, 8.7215e-01, + 1.0256e+00, -1.7362e-01, 7.3548e-01, 1.4111e-01, -5.5416e-01, + 2.4390e-01, 6.4423e-02, -2.1135e-01, -9.8368e-01, -2.9405e-01, + -1.6668e+00, 4.6341e-01, 1.2250e-01, -5.5501e-01, -4.7055e-01, + 2.1411e-01, -3.3290e-01, -1.1014e+00, -5.0471e-01, 2.8561e-02, + 7.3296e-01, 4.8726e-01, -7.1554e-01, -3.3608e-01, 2.3547e-02, + 3.0119e-01, 1.4341e+00, 1.3812e+00, 4.6313e-01, -4.3285e-01, + -1.3715e+00, -8.1076e-01, 5.3997e-01, 2.6896e-01, -4.9527e-01, + 6.2146e-01, 1.7119e-01, 1.2711e+00, 1.5919e-01, -8.3132e-02, + -4.0712e-01, -4.5849e-01, 1.1198e+00, -6.1305e-01, 3.2838e-01, + -9.5643e-01, 6.9190e-01, -5.2347e-01, -7.4339e-01, -4.6691e-02, + 4.7998e-01, 7.6136e-01, 5.9913e-01, -1.0608e+00, -4.2126e-01, + 1.0778e+00, -9.3157e-02, -8.1845e-01, 1.4165e+00, 8.8506e-02, + 1.1951e+00, -4.6053e-01, -2.9598e-01, -1.0353e+00, -1.5638e-01, + -1.8602e+00, -5.0030e-01, 5.7830e-01, -8.2748e-01, 2.0043e+00, + 5.3103e-01], device='cuda:0')), ('module.conv8.1.running_var', tensor([0.0531, 0.0432, 0.0521, 0.0578, 0.0544, 0.1228, 0.0397, 0.0574, 0.0540, + 0.0353, 0.0486, 0.0607, 0.0464, 0.0311, 0.0592, 0.0557, 0.0396, 0.1498, + 0.0525, 0.0540, 0.0617, 0.0512, 0.0305, 0.0860, 0.0515, 0.0524, 0.0268, + 0.0376, 0.0653, 0.0307, 0.0265, 0.0631, 0.0277, 0.0501, 0.0538, 0.0192, + 0.0615, 0.0782, 0.0537, 0.0363, 0.0451, 0.0399, 0.0557, 0.0400, 0.0294, + 0.0567, 0.0579, 0.1004, 0.0529, 0.0451, 0.0497, 0.0233, 0.0189, 0.0488, + 0.0291, 0.0481, 0.0389, 0.0589, 0.1375, 0.0415, 0.0612, 0.0822, 0.0428, + 0.0750, 0.0676, 0.0271, 0.0852, 0.0576, 0.0364, 0.0334, 0.0775, 0.0351, + 0.0446, 0.0337, 0.0268, 0.0629, 0.0577, 0.0316, 0.0426, 0.0366, 0.0229, + 0.0472, 0.0377, 0.0285, 0.0340, 0.0441, 0.0464, 0.0681, 0.0387, 0.0529, + 0.0468, 0.0719, 0.0278, 0.0578, 0.0831, 0.0804, 0.0478, 0.0312, 0.0413, + 0.0524, 0.0464, 0.0389, 0.0563, 0.0600, 0.0591, 0.0623, 0.0295, 0.0622, + 0.0871, 0.0434, 0.0277, 0.0362, 0.0418, 0.0544, 0.0648, 0.0218, 0.0488, + 0.0461, 0.0447, 0.0559, 0.0497, 0.0410, 0.0552, 0.0600, 0.0467, 0.0510, + 0.0381, 0.0729, 0.0492, 0.0515, 0.0356, 0.0420, 0.0549, 0.0699, 0.0372, + 0.0558, 0.0743, 0.0211, 0.0498, 0.0356, 0.0646, 0.0355, 0.0637, 0.0411, + 0.0594, 0.0658, 0.0510, 0.0407, 0.0505, 0.0620, 0.0300, 0.0388, 0.0370, + 0.0841, 0.0858, 0.0373, 0.0341, 0.0533, 0.0586, 0.0344, 0.0270, 0.0442, + 0.0468, 0.0625, 0.0669, 0.0506, 0.0471, 0.0544, 0.0556, 0.0486, 0.0648, + 0.0298, 0.0470, 0.0516, 0.0375, 0.0649, 0.0427, 0.0288, 0.0229, 0.0337, + 0.0414, 0.0376, 0.0232, 0.0776, 0.0408, 0.0398, 0.0542, 0.0589, 0.0580, + 0.0441, 0.0236, 0.0538, 0.0249, 0.0447, 0.0541, 0.0801, 0.0758, 0.0473, + 0.0350, 0.0553, 0.0460, 0.0670, 0.0355, 0.0767, 0.0479, 0.0364, 0.0422, + 0.0298, 0.0358, 0.0430, 0.0960, 0.0486, 0.1065, 0.0305, 0.0647, 0.0649, + 0.0557, 0.0524, 0.0365, 0.0608, 0.0745, 0.0569, 0.0722, 0.0612, 0.0521, + 0.0402, 0.0585, 0.0555, 0.0645, 0.0441, 0.0250, 0.0323, 0.0500, 0.0489, + 0.0326, 0.0645, 0.0778, 0.0453, 0.0312, 0.0514, 0.0353, 0.0339, 0.0518, + 0.0480, 0.0332, 0.0429, 0.0569, 0.0477, 0.0427, 0.0768, 0.0333, 0.0448, + 0.0502, 0.0633, 0.0474, 0.0440], device='cuda:0')), ('module.conv8.1.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.conv9.0.weight', tensor([[[ 0.0157], + [ 0.0208], + [ 0.0128], + ..., + [ 0.0036], + [-0.0138], + [ 0.0306]], + + [[ 0.0401], + [-0.0284], + [ 0.0182], + ..., + [ 0.0192], + [ 0.0065], + [-0.0014]], + + [[ 0.0419], + [-0.0522], + [ 0.0224], + ..., + [ 0.0181], + [-0.0057], + [ 0.0291]], + + ..., + + [[-0.0101], + [ 0.0231], + [ 0.0298], + ..., + [ 0.0387], + [-0.0046], + [-0.0334]], + + [[ 0.0010], + [ 0.0016], + [ 0.0080], + ..., + [ 0.0252], + [ 0.0029], + [-0.0036]], + + [[-0.0009], + [ 0.0021], + [-0.0263], + ..., + [ 0.0523], + [ 0.0146], + [-0.0106]]], device='cuda:0')), ('module.conv9.1.weight', tensor([0.6226, 0.5779, 0.7111, 0.6101, 0.7459, 0.7202, 0.6411, 0.5684, 0.7304, + 0.5505, 0.5243, 0.6461, 0.6926, 0.6356, 0.5848, 0.6103, 0.7593, 0.6569, + 0.5361, 0.5942, 0.5781, 0.5581, 0.5624, 0.5988, 0.4425, 0.5700, 0.6438, + 0.6012, 0.5232, 0.6162, 0.6568, 0.5419, 0.6366, 0.6227, 0.4681, 0.6104, + 0.6165, 0.5403, 0.7192, 0.6316, 0.5093, 0.7578, 0.7485, 0.5667, 0.6746, + 0.6822, 0.5845, 0.6114, 0.6166, 0.7158, 0.5597, 0.5891, 0.6213, 0.5997, + 0.5359, 0.5449, 0.6071, 0.6615, 0.6150, 0.5765, 0.5491, 0.6936, 0.6118, + 0.6273, 0.6168, 0.5931, 0.7683, 0.7403, 0.5190, 0.6243, 0.7413, 0.6641, + 0.5093, 0.5727, 0.5620, 0.5876, 0.6844, 0.6052, 0.5821, 0.5480, 0.5626, + 0.5544, 0.6318, 0.6014, 0.7053, 0.7174, 0.5355, 0.5201, 0.5152, 0.5391, + 0.6946, 0.5794, 0.5174, 0.5422, 0.6392, 0.6332, 0.5782, 0.7359, 0.6275, + 0.5356, 0.6401, 0.5545, 0.5097, 0.5167, 0.6474, 0.7017, 0.5151, 0.6150, + 0.5057, 0.7647, 0.5022, 0.5596, 0.6011, 0.5422, 0.5353, 0.5854, 0.5972, + 0.6718, 0.6263, 0.5183, 0.5773, 0.6467, 0.5548, 0.6688, 0.5383, 0.7225, + 0.5821, 0.8546, 0.6255, 0.6119, 0.7325, 0.5714, 0.6477, 0.4745, 0.7224, + 0.6924, 0.6096, 0.6536, 0.5273, 0.7535, 0.6486, 0.4737, 0.6451, 0.5303, + 0.7275, 0.6395, 0.5261, 0.4678, 0.6351, 0.5788, 0.6072, 0.7128, 0.6788, + 0.5757, 0.5632, 0.7167, 0.6581, 0.6026, 0.5152, 0.6542, 0.5238, 0.5934, + 0.5098, 0.5474, 0.5955, 0.5794, 0.5691, 0.6741, 0.7431, 0.5605, 0.7425, + 0.4771, 0.6316, 0.5784, 0.6175, 0.5925, 0.7595, 0.4885, 0.6532, 0.7380, + 0.5513, 0.7149, 0.6233, 0.6725, 0.5737, 0.6078, 0.5994, 0.7083, 0.6195, + 0.5393, 0.5609, 0.8236, 0.5870, 0.5723, 0.5348, 0.5125, 0.6559, 0.7676, + 0.5725, 0.5361, 0.6094, 0.5743, 0.5731, 0.4321, 0.5596, 0.5669, 0.5459, + 0.6645, 0.5306, 0.5926, 0.5306, 0.5523, 0.5735, 0.6918, 0.5582, 0.5910, + 0.6099, 0.5703, 0.5421, 0.4958, 0.5488, 0.7844, 0.6439, 0.7709, 0.6108, + 0.5091, 0.7249, 0.7102, 0.5906, 0.5033, 0.6028, 0.5491, 0.6216, 0.6770, + 0.6140, 0.5667, 0.6594, 0.6803, 0.5505, 0.6597, 0.6188, 0.6077, 0.5960, + 0.5762, 0.5890, 0.5766, 0.7170, 0.6740, 0.7188, 0.7555, 0.5970, 0.5619, + 0.6517, 0.7393, 0.5757, 0.5051], device='cuda:0')), ('module.conv9.1.bias', tensor([-0.2170, -0.0310, -0.0988, 0.0487, -0.0312, -0.0130, -0.0537, -0.0426, + 0.0014, -0.0416, -0.0396, -0.0725, -0.0075, -0.0937, -0.1644, -0.0798, + -0.1929, 0.0067, -0.0787, 0.0544, -0.0484, -0.1948, -0.0049, 0.0090, + -0.1226, -0.0614, -0.0773, -0.0869, 0.0276, -0.0051, -0.1750, -0.1782, + 0.0702, 0.0293, 0.0192, -0.0831, -0.0519, -0.0622, -0.0310, 0.0473, + 0.0115, -0.0072, -0.0604, 0.0569, 0.0387, -0.0205, -0.0636, 0.0378, + -0.1546, -0.0526, -0.1494, 0.0343, -0.0199, 0.0012, 0.0051, -0.0014, + -0.1953, 0.0316, -0.1846, 0.0519, -0.0836, -0.0976, -0.0880, 0.1005, + -0.0331, -0.1253, 0.0254, 0.0263, -0.1414, -0.0912, -0.0212, -0.0592, + -0.3242, -0.0332, -0.0921, -0.3251, -0.0107, -0.2413, -0.0921, -0.0573, + -0.0654, -0.2153, 0.0033, -0.2458, 0.0437, -0.1673, 0.0677, -0.2057, + 0.0821, -0.0131, 0.0227, -0.1236, -0.2385, -0.0143, -0.0123, -0.0346, + -0.0052, -0.1061, 0.0841, -0.0014, 0.0377, -0.2477, -0.0790, -0.1276, + -0.0130, 0.0166, -0.3100, -0.0152, -0.1520, -0.0073, 0.0600, -0.2974, + 0.0602, -0.0320, -0.0776, -0.1355, 0.0946, -0.0443, -0.0232, -0.3531, + 0.0545, 0.0883, -0.0968, -0.1333, -0.3653, 0.0034, 0.0171, 0.0065, + -0.0617, 0.0243, -0.0501, -0.2803, -0.2336, 0.0566, -0.0109, -0.0022, + 0.0525, -0.1713, -0.2902, 0.0135, -0.2197, 0.0879, 0.0112, -0.0573, + -0.1882, 0.0200, -0.1664, -0.0685, 0.0494, 0.0616, 0.0487, 0.0080, + 0.0259, 0.0402, -0.1211, -0.0805, -0.1758, 0.0028, -0.2267, -0.2386, + -0.0307, -0.0068, -0.1553, -0.0744, 0.0674, -0.1869, -0.0832, 0.0753, + 0.0245, -0.0422, -0.0455, -0.1663, 0.0308, -0.0209, -0.2745, -0.2819, + -0.0409, -0.0598, 0.0402, -0.0241, 0.0534, 0.0685, -0.2451, 0.0352, + -0.1865, -0.0997, 0.1086, -0.1171, -0.0567, -0.0142, -0.1399, 0.0151, + -0.1117, -0.0816, -0.3181, -0.0265, 0.0528, 0.0076, -0.1335, -0.0827, + 0.0046, -0.0197, 0.0757, -0.0917, 0.0098, 0.0395, -0.0686, -0.0401, + -0.0138, 0.0804, 0.0633, -0.2460, -0.0473, 0.0353, -0.0249, -0.0243, + 0.0711, 0.0348, 0.0430, -0.1605, -0.0386, -0.0667, -0.2364, 0.0509, + -0.2598, -0.2726, 0.0156, -0.0554, -0.0502, -0.1221, 0.0201, -0.0576, + 0.0900, -0.1347, 0.0304, -0.0324, -0.1809, -0.1486, 0.0712, -0.0160, + -0.1753, -0.0975, -0.0247, 0.0352, -0.0621, 0.0609, -0.0325, 0.0135, + 0.0200, 0.0407, -0.2593, -0.0471, -0.2215, 0.0491, 0.0535, -0.2491], + device='cuda:0')), ('module.conv9.1.running_mean', tensor([-3.4690e-01, -3.1970e-02, -1.2713e-01, 7.8237e-02, -2.1373e-02, + -5.7925e-02, 1.2020e-03, -1.5022e-01, -8.9048e-02, -1.0673e-02, + -1.4895e-01, -9.6178e-02, -3.6889e-02, -7.7319e-02, -3.4310e-01, + -1.2211e-01, -2.3033e-01, -1.3725e-01, -1.6090e-01, 1.8006e-02, + -9.4403e-02, -1.8156e-01, -1.3622e-01, -5.9155e-02, -2.3709e-01, + -1.2678e-01, -1.4457e-03, -6.3109e-02, -1.2476e-02, 2.9832e-02, + -3.7519e-01, -3.6943e-01, 6.4961e-03, 1.0919e-02, -1.8239e-01, + -1.3789e-01, -2.5692e-01, -1.8134e-01, -8.6468e-02, -1.8330e-02, + -1.2256e-01, 1.1145e-01, -1.3317e-01, -4.4275e-02, -1.0242e-01, + -1.6042e-01, -1.2620e-01, -8.1089e-03, -2.7272e-01, -2.0920e-01, + -3.7624e-01, -5.3083e-02, -7.9922e-02, -2.4579e-01, -4.1349e-02, + -1.8715e-02, -4.6828e-01, -3.6632e-02, -3.1954e-01, -4.5855e-02, + -2.7011e-01, -2.5727e-01, -9.7844e-02, 2.4020e-01, -8.8557e-02, + -3.5869e-01, 1.3651e-01, -1.1983e-02, -2.7594e-01, -2.3819e-01, + -6.1687e-02, -2.3113e-01, -5.0003e-01, -2.2050e-01, -6.8881e-02, + -4.6418e-01, -9.7993e-02, -5.0303e-01, -1.0902e-01, -1.1366e-01, + -1.1766e-01, -4.4700e-01, -5.7806e-02, -3.4928e-01, -6.2481e-03, + -2.0928e-01, 1.7405e-01, -3.4542e-01, 1.0241e-01, -3.7274e-02, + -6.1344e-02, -1.9960e-01, -4.2360e-01, -1.0810e-01, -1.0010e-01, + -1.2489e-01, -5.3978e-02, -2.2705e-01, 1.6446e-02, -2.5721e-02, + 6.0314e-03, -3.6459e-01, -1.8675e-01, -2.8506e-01, 3.6109e-03, + -1.1740e-01, -4.9686e-01, -1.3097e-01, -3.5433e-01, -6.1953e-02, + 5.9330e-03, -4.9691e-01, 1.0302e-01, -1.0784e-01, -1.5385e-01, + -2.3930e-01, -7.6253e-02, -1.7073e-01, -4.2903e-02, -5.9578e-01, + -2.3033e-02, 7.3991e-02, -1.5132e-01, -2.2875e-01, -5.7464e-01, + 1.2045e-02, 1.5259e-01, 2.2698e-02, -1.2321e-02, 2.8336e-02, + -7.0975e-02, -4.7671e-01, -3.2868e-01, -1.4916e-02, 1.2285e-01, + 7.6774e-02, 1.4494e-01, -4.1345e-01, -4.6182e-01, -4.8463e-04, + -3.6147e-01, -3.8726e-02, -9.4645e-02, -9.8972e-02, -3.3224e-01, + 6.6328e-02, -3.8574e-01, -6.5025e-02, -1.9355e-02, -4.1928e-02, + -2.7942e-02, -1.3675e-01, -9.0207e-02, 1.6240e-01, -2.1510e-01, + -2.5985e-01, -3.0287e-01, -1.0513e-01, -3.8030e-01, -3.6728e-01, + -1.1745e-01, -5.0993e-02, -3.7855e-01, -9.0005e-02, 1.0385e-01, + -2.4488e-01, -1.3680e-01, 8.1523e-03, 4.2134e-03, -4.7236e-02, + -1.3577e-01, -3.4590e-01, 1.7077e-01, -9.6146e-02, -4.1343e-01, + -4.8108e-01, -3.5540e-02, -7.4613e-02, 1.1850e-02, -1.4102e-01, + 5.6265e-02, 8.6475e-02, -3.0433e-01, -1.0524e-01, -3.2874e-01, + -2.1750e-01, -5.2179e-02, -1.4227e-01, -2.2266e-01, -7.4450e-02, + -1.3168e-01, 6.0100e-02, -3.6141e-01, -2.0720e-01, -5.6104e-01, + -2.5403e-02, -6.4887e-02, 8.2603e-03, -1.4174e-01, -2.6638e-01, + -4.4565e-02, -9.0775e-02, 2.8629e-04, -8.0418e-02, -3.3829e-02, + 2.2968e-02, -7.5467e-02, -7.3682e-02, -1.1375e-02, 5.0907e-02, + 2.9746e-02, -5.6768e-01, -5.5499e-02, 2.7643e-02, -1.0926e-01, + -8.0734e-02, -2.0403e-02, 6.3541e-02, -3.6691e-02, -3.6280e-01, + -1.3140e-01, -9.2061e-02, -3.5287e-01, -4.2136e-02, -5.3335e-01, + -4.7398e-01, -5.2177e-02, -1.9104e-02, 4.3454e-02, -1.7929e-01, + -7.1999e-02, -7.9322e-02, 4.1897e-02, -2.1090e-01, 1.0160e-01, + -5.6137e-02, -3.5983e-01, -2.9724e-01, 8.7154e-03, -2.2268e-02, + -3.2232e-01, -1.1682e-01, -9.0703e-02, -6.0229e-02, -4.9165e-02, + -4.4359e-03, -1.1074e-01, -4.2587e-03, -8.4900e-02, -6.3527e-02, + -5.1124e-01, -7.7233e-02, -3.4528e-01, 7.0478e-02, -8.7243e-02, + -1.9804e-01], device='cuda:0')), ('module.conv9.1.running_var', tensor([0.1899, 0.1113, 0.0920, 0.1232, 0.3023, 0.1324, 0.3175, 0.1891, 0.1293, + 0.1455, 0.1242, 0.1585, 0.2575, 0.0841, 0.0880, 0.1728, 0.3042, 0.1273, + 0.1115, 0.1099, 0.1763, 0.1694, 0.1699, 0.2841, 0.0747, 0.1207, 0.1776, + 0.1846, 0.0553, 0.2883, 0.2296, 0.1603, 0.1294, 0.1453, 0.1052, 0.1480, + 0.0919, 0.2332, 0.1185, 0.2944, 0.1343, 0.1549, 0.1048, 0.2091, 0.1598, + 0.1182, 0.2430, 0.2449, 0.2083, 0.0908, 0.0655, 0.1696, 0.2503, 0.1136, + 0.0905, 0.1073, 0.1995, 0.1595, 0.1302, 0.2162, 0.0717, 0.0868, 0.0797, + 0.1406, 0.1039, 0.1326, 0.1950, 0.1846, 0.0797, 0.1382, 0.1369, 0.1290, + 0.1097, 0.0987, 0.1989, 0.1687, 0.1737, 0.1114, 0.1545, 0.2358, 0.1121, + 0.1554, 0.2403, 0.2232, 0.1902, 0.2537, 0.1350, 0.1975, 0.1062, 0.2059, + 0.1318, 0.1179, 0.1242, 0.1484, 0.1743, 0.2062, 0.1420, 0.2236, 0.1886, + 0.1294, 0.2888, 0.1803, 0.0544, 0.1089, 0.2478, 0.1400, 0.0913, 0.1284, + 0.0938, 0.1643, 0.1373, 0.1081, 0.1212, 0.1176, 0.1589, 0.0802, 0.1343, + 0.1485, 0.1413, 0.1245, 0.0903, 0.2746, 0.1165, 0.1190, 0.1228, 0.2268, + 0.0778, 0.2722, 0.2508, 0.1219, 0.1212, 0.1587, 0.2205, 0.2382, 0.1755, + 0.1493, 0.1811, 0.1031, 0.1094, 0.2810, 0.1863, 0.0612, 0.2947, 0.1417, + 0.2432, 0.2634, 0.0868, 0.1230, 0.1974, 0.2540, 0.2218, 0.2026, 0.1112, + 0.0848, 0.0982, 0.1768, 0.1584, 0.1095, 0.0750, 0.2081, 0.1977, 0.1275, + 0.0961, 0.0773, 0.1142, 0.1429, 0.1599, 0.2117, 0.1111, 0.1273, 0.1755, + 0.0804, 0.0966, 0.1560, 0.1835, 0.1723, 0.1644, 0.2175, 0.1650, 0.2210, + 0.1236, 0.1582, 0.2410, 0.1201, 0.0893, 0.1952, 0.2001, 0.3300, 0.1487, + 0.1550, 0.1974, 0.2892, 0.0904, 0.1494, 0.1432, 0.1586, 0.2218, 0.2921, + 0.1035, 0.0743, 0.2720, 0.0967, 0.0682, 0.1022, 0.2986, 0.1707, 0.0708, + 0.1512, 0.1431, 0.1295, 0.0961, 0.1390, 0.1940, 0.1797, 0.1289, 0.1079, + 0.1925, 0.0919, 0.2591, 0.1006, 0.2292, 0.2350, 0.2042, 0.2713, 0.1428, + 0.1183, 0.1672, 0.2736, 0.1628, 0.0901, 0.0923, 0.1417, 0.2522, 0.1513, + 0.1823, 0.1416, 0.0822, 0.1877, 0.2366, 0.1861, 0.2000, 0.1462, 0.1716, + 0.1434, 0.1858, 0.1693, 0.1788, 0.3953, 0.1661, 0.1958, 0.1266, 0.0838, + 0.1994, 0.3833, 0.1797, 0.0676], device='cuda:0')), ('module.conv9.1.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.conv10.0.weight', tensor([[[ 0.0212], + [ 0.0209], + [-0.0539], + ..., + [ 0.0507], + [ 0.0192], + [ 0.0019]], + + [[ 0.0045], + [ 0.0064], + [-0.0047], + ..., + [-0.0147], + [-0.0039], + [ 0.0077]], + + [[ 0.0263], + [ 0.0256], + [-0.0288], + ..., + [ 0.0398], + [-0.0643], + [ 0.0003]], + + ..., + + [[-0.0166], + [-0.0078], + [-0.0145], + ..., + [ 0.0200], + [ 0.0124], + [ 0.0017]], + + [[-0.0141], + [-0.0107], + [-0.0181], + ..., + [-0.0191], + [ 0.0015], + [-0.0047]], + + [[ 0.0014], + [ 0.0371], + [ 0.0028], + ..., + [ 0.0419], + [-0.0186], + [ 0.0209]]], device='cuda:0')), ('module.conv10.1.weight', tensor([0.8015, 0.2252, 0.9373, 0.9009, 0.8371, 0.9301, 0.4780, 0.6508, 0.5210, + 0.9029, 0.9498, 0.7184, 0.3249, 0.3362, 0.9156, 0.9676, 0.9653, 0.8013, + 0.9641, 0.9074, 0.7074, 0.7666, 0.7584, 0.8730, 0.2298, 0.9501, 0.2364, + 0.0741, 0.9448, 0.8492, 0.9594, 0.8277, 0.4666, 0.9187, 0.8561, 0.9430, + 0.8778, 0.7370, 0.7970, 0.9080, 0.6319, 0.9229, 0.9372, 0.1750, 0.4493, + 0.8366, 0.6683, 0.2292, 0.3172, 0.9359, 0.9505, 0.8996, 0.3965, 0.8629, + 0.8986, 0.9294, 0.8499, 0.7422, 0.3883, 0.9576, 0.7270, 0.8534, 0.9350, + 0.8158, 0.9616, 0.9568, 0.9277, 0.8707, 0.9401, 0.2764, 0.5540, 0.7615, + 0.9572, 0.5237, 0.9488, 0.6275, 0.8726, 0.6882, 0.9384, 0.9068, 0.7133, + 0.9206, 0.8501, 0.8965, 0.8984, 0.9614, 0.4725, 0.9484, 0.9400, 0.9529, + 0.3343, 0.9027, 0.6408, 0.7421, 0.8778, 0.6821, 0.6470, 0.6392, 0.6938, + 0.8965, 0.9144, 0.7558, 0.8165, 0.7853, 0.9597, 0.9158, 0.5280, 0.9324, + 0.7621, 0.5329, 0.2605, 0.9593, 0.5097, 0.8010, 0.9206, 0.7345, 0.7195, + 0.5423, 0.8894, 0.5222, 0.8384, 0.9545, 0.5171, 0.9189, 0.6087, 0.4509, + 0.3672, 0.8354], device='cuda:0')), ('module.conv10.1.bias', tensor([-8.6660e-02, -2.7512e-02, 1.0634e-02, -1.4279e-02, -3.7932e-02, + 2.2786e-03, -4.8029e-02, 1.4300e-05, -3.4495e-02, -8.5370e-02, + -8.6371e-02, -5.0839e-02, -2.0320e-02, -1.5466e-02, -2.9051e-02, + -7.9343e-03, -7.3030e-02, -3.1973e-02, -8.2262e-02, 2.3977e-02, + -3.0186e-03, -2.9487e-02, -4.9020e-02, -4.5043e-02, -2.3022e-02, + -6.6896e-03, -2.0929e-02, -3.7868e-03, -1.0604e-03, -5.8307e-03, + -4.7679e-02, -3.4500e-02, -2.1458e-02, 1.6146e-02, -1.8226e-02, + -4.9448e-02, -4.5903e-03, -2.3152e-03, 1.7686e-02, 4.2685e-02, + 1.3087e-02, -4.6810e-02, 3.6077e-03, -1.7218e-02, -3.8915e-02, + 2.8525e-02, -9.2412e-03, -1.6305e-02, -1.2359e-02, -6.7783e-03, + -4.5505e-02, 2.9263e-02, -2.1087e-02, -1.0610e-01, -6.5504e-03, + -3.3754e-02, 2.3730e-02, -3.4061e-02, -2.7593e-02, -6.7381e-02, + -2.4079e-02, 2.3773e-03, -8.3001e-02, 1.1731e-03, -7.3067e-02, + -6.7051e-02, -1.0855e-02, -6.8399e-02, -6.9189e-02, -2.0795e-02, + -9.3339e-03, -1.1927e-01, -3.5769e-02, -9.6744e-03, 6.3519e-03, + 4.0807e-03, -4.8269e-02, -5.4872e-03, -2.5825e-02, -4.7765e-03, + -5.8219e-02, -2.9842e-02, -4.2608e-02, -6.9282e-03, -3.0244e-02, + -2.5694e-02, -9.1605e-03, 1.8612e-03, 8.4631e-03, -3.3621e-02, + -1.8484e-02, 1.4802e-02, 8.9858e-03, 1.5266e-02, -7.1313e-03, + -1.0148e-02, -3.6386e-02, -8.8002e-03, -8.6535e-03, -2.0897e-02, + -3.2791e-03, -9.5493e-03, -2.5698e-02, 1.3044e-02, -5.3298e-03, + 1.5053e-03, -2.3083e-02, -6.9719e-02, -3.6337e-02, 4.9225e-03, + -2.0425e-02, -2.2978e-02, -4.3961e-03, -2.9562e-02, -5.4046e-02, + -1.5622e-02, -1.3388e-02, -5.1301e-02, -4.6756e-02, -2.3418e-02, + -1.5255e-02, -9.0177e-03, -6.9495e-02, 1.0543e-02, 6.3831e-03, + -6.0427e-02, -2.2419e-02, 1.8376e-02], device='cuda:0')), ('module.conv10.1.running_mean', tensor([ 0.2301, -0.0858, -0.2921, -0.0457, 0.0209, -0.4680, 0.3066, -0.3436, + 0.2388, 0.5737, -0.3007, 0.2554, -0.0358, -0.0747, -0.2055, -0.2635, + 0.0317, 0.3802, 0.2516, -0.3124, -0.2975, 0.2440, -0.1890, 0.3440, + 0.0074, -0.2376, 0.0031, -0.0044, -0.1872, -0.1042, -0.1907, -0.1874, + 0.1850, -0.2955, -0.1855, -0.0908, -0.3136, -0.3080, -0.2629, 0.0856, + -0.2758, 0.2606, -0.1625, -0.0790, 0.2225, -0.4050, -0.1213, -0.0133, + -0.0087, -0.3481, -0.2254, -0.1735, -0.2240, 0.2645, 0.2940, 0.0359, + -0.1327, 0.3415, -0.0921, -0.3031, 0.2253, -0.1436, -0.2576, -0.3225, + 0.2778, -0.0991, -0.1911, 0.0288, 0.0994, -0.0483, 0.0646, 0.3204, + -0.2163, -0.0381, -0.1356, -0.2670, -0.1516, -0.0927, -0.1902, -0.4946, + 0.2121, -0.2799, -0.0722, -0.2272, -0.2151, -0.3749, -0.3035, -0.1858, + 0.3971, -0.4306, -0.0274, -0.4580, -0.1872, -0.3702, 0.2135, -0.2760, + 0.4323, -0.2614, -0.1481, -0.2262, 0.3974, 0.4103, 0.0756, -0.2605, + -0.1395, -0.2482, -0.1717, -0.1674, -0.1779, -0.2963, -0.0107, 0.2392, + -0.3089, 0.3841, 0.5516, -0.2215, 0.1260, 0.1341, -0.0015, -0.1106, + -0.0487, -0.4744, 0.1664, 0.5308, -0.3666, 0.2434, 0.0023, 0.0905], + device='cuda:0')), ('module.conv10.1.running_var', tensor([3.0393e-01, 1.4213e-02, 8.2882e-01, 7.0779e-01, 4.2531e-01, 5.1979e-01, + 1.0894e-01, 1.8011e-01, 8.8379e-02, 4.0628e-01, 9.4501e-01, 1.6457e-01, + 1.0463e-02, 1.5628e-02, 6.5976e-01, 9.9919e-01, 8.3391e-01, 1.8527e-01, + 8.1469e-01, 3.9413e-01, 1.8188e-01, 1.4616e-01, 4.8189e-01, 2.4004e-01, + 1.6527e-02, 7.9543e-01, 1.1487e-02, 2.4170e-04, 7.4977e-01, 3.9868e-01, + 6.3322e-01, 3.3846e-01, 1.6624e-01, 6.3598e-01, 3.1920e-01, 9.8898e-01, + 3.1216e-01, 1.1661e-01, 2.1431e-01, 7.1452e-01, 1.8172e-01, 4.7235e-01, + 7.8197e-01, 4.9825e-03, 1.0185e-01, 3.5355e-01, 8.0131e-02, 2.6818e-03, + 3.7468e-03, 6.6704e-01, 8.1577e-01, 3.9523e-01, 9.4484e-02, 3.6161e-01, + 5.8243e-01, 4.3409e-01, 3.5554e-01, 1.8034e-01, 4.2000e-02, 7.5259e-01, + 1.8168e-01, 5.6277e-01, 7.6217e-01, 2.1920e-01, 8.2623e-01, 1.0546e+00, + 6.3078e-01, 5.5690e-01, 7.7662e-01, 2.0980e-02, 2.2966e-01, 3.0030e-01, + 5.6320e-01, 1.0852e-01, 6.3178e-01, 1.8067e-01, 5.5005e-01, 1.1868e-01, + 9.2327e-01, 3.9416e-01, 2.1718e-01, 5.3000e-01, 2.9804e-01, 4.2417e-01, + 4.4208e-01, 9.4922e-01, 1.0552e-01, 7.8810e-01, 5.8501e-01, 8.0887e-01, + 1.3617e-02, 4.4939e-01, 1.4311e-01, 2.8314e-01, 5.9850e-01, 1.4934e-01, + 2.3435e-01, 1.4563e-01, 1.4877e-01, 5.1729e-01, 4.8582e-01, 3.1407e-01, + 2.7550e-01, 1.9792e-01, 7.4529e-01, 5.9428e-01, 8.1008e-02, 9.4468e-01, + 1.7417e-01, 8.7544e-02, 1.0872e-02, 6.6777e-01, 9.9943e-02, 2.5464e-01, + 3.9253e-01, 1.8366e-01, 1.7111e-01, 1.4506e-01, 4.4178e-01, 5.2595e-02, + 5.3852e-01, 7.2784e-01, 1.1154e-01, 4.7631e-01, 1.8129e-01, 1.0700e-01, + 1.2552e-02, 3.1532e-01], device='cuda:0')), ('module.conv10.1.num_batches_tracked', tensor(5733, device='cuda:0')), ('module.conv11.weight', tensor([[[-3.5775e-02], + [ 7.5706e-03], + [ 7.0941e-02], + [ 6.4811e-02], + [-3.1488e-02], + [-6.9514e-02], + [ 1.1817e-02], + [ 2.5812e-02], + [ 1.2879e-02], + [-5.6779e-02], + [ 7.1265e-02], + [-1.2221e-02], + [ 8.2420e-04], + [ 1.7873e-03], + [ 5.2704e-02], + [ 9.8671e-02], + [-8.7126e-02], + [ 2.7380e-02], + [-9.5956e-02], + [-5.4914e-02], + [-1.8174e-02], + [-2.0248e-02], + [ 1.9452e-02], + [ 3.9899e-02], + [ 2.9385e-03], + [-7.3399e-02], + [ 1.2576e-03], + [ 9.6630e-05], + [-7.3227e-02], + [ 3.2587e-02], + [-8.5263e-02], + [-3.0058e-02], + [ 1.3597e-02], + [ 6.8711e-02], + [-3.1157e-02], + [ 6.5942e-02], + [-3.3199e-02], + [ 1.5419e-02], + [ 3.8178e-02], + [ 7.5061e-02], + [-2.5401e-02], + [ 5.4692e-02], + [-6.4815e-02], + [ 3.2262e-03], + [ 1.3106e-02], + [-4.4865e-02], + [ 8.3177e-03], + [ 1.1620e-04], + [ 2.7896e-04], + [-6.9742e-02], + [ 8.0971e-02], + [-5.8206e-02], + [ 2.0579e-02], + [-4.4725e-02], + [ 5.5745e-02], + [ 5.2517e-02], + [-4.1656e-02], + [ 2.4406e-02], + [-7.0422e-03], + [ 7.8744e-02], + [-1.7453e-02], + [ 3.0529e-02], + [ 6.4681e-02], + [-2.3166e-02], + [-8.5126e-02], + [ 7.7968e-02], + [-6.0334e-02], + [-4.1762e-02], + [-6.1811e-02], + [ 4.7851e-04], + [ 1.2781e-02], + [-3.1758e-02], + [-8.9824e-02], + [-1.7494e-02], + [-6.9538e-02], + [ 1.3558e-02], + [ 3.1074e-02], + [ 1.7261e-02], + [ 6.9113e-02], + [ 6.0499e-02], + [-2.0566e-02], + [ 6.6429e-02], + [-3.0451e-02], + [-5.2167e-02], + [-4.3893e-02], + [ 9.2334e-02], + [ 2.5817e-02], + [ 8.6954e-02], + [ 7.6245e-02], + [ 7.2020e-02], + [ 2.3068e-04], + [-5.6384e-02], + [ 2.2550e-02], + [ 3.2843e-02], + [ 5.3094e-02], + [-1.8558e-02], + [ 2.1521e-02], + [ 1.9609e-02], + [-1.9353e-02], + [ 4.5179e-02], + [ 6.3009e-02], + [ 3.0736e-02], + [-2.2671e-02], + [ 3.2365e-02], + [-8.1356e-02], + [-4.4201e-02], + [-1.0509e-02], + [ 5.9616e-02], + [-1.9316e-02], + [ 1.9008e-02], + [-5.4945e-04], + [ 9.2719e-02], + [ 2.2428e-02], + [ 3.4043e-02], + [-7.1129e-02], + [-1.1980e-02], + [-1.4372e-02], + [-1.3479e-02], + [-3.5066e-02], + [ 8.3591e-03], + [ 3.7630e-02], + [-7.2617e-02], + [-1.0860e-02], + [-7.7352e-02], + [ 2.3487e-02], + [-1.1260e-02], + [-3.4782e-05], + [ 3.5266e-02]]], device='cuda:0'))]) +2025-07-07 09:44:50,685 - INFO - ******************** +2025-07-07 09:44:50,689 - INFO - Evaluation Results: +2025-07-07 10:16:34,317 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 10:16:34,320 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 10:16:34,689 - INFO - Using device: cuda:0 +2025-07-07 10:16:34,966 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 10:16:35,032 - INFO - ******************** +2025-07-07 10:18:22,912 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 10:18:22,917 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 10:18:23,303 - INFO - Using device: cuda:0 +2025-07-07 10:18:23,522 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 10:18:23,588 - INFO - ******************** +2025-07-07 10:18:23,599 - INFO - Evaluation Results: +2025-07-07 10:19:22,110 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 10:19:22,117 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 10:19:22,472 - INFO - Using device: cuda:0 +2025-07-07 10:19:22,654 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 10:19:22,721 - INFO - ******************** +2025-07-07 10:22:00,690 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 10:22:00,692 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 10:22:01,041 - INFO - Using device: cuda:0 +2025-07-07 10:22:01,220 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 10:22:01,286 - INFO - ******************** +2025-07-07 10:22:01,295 - INFO - 0: torch.Size([64]) +2025-07-07 10:22:01,295 - INFO - 1: torch.Size([64]) +2025-07-07 10:22:01,295 - INFO - 2: torch.Size([128]) +2025-07-07 10:22:01,295 - INFO - 3: torch.Size([128]) +2025-07-07 10:22:01,295 - INFO - 4: torch.Size([1024]) +2025-07-07 10:22:01,295 - INFO - 5: torch.Size([1024]) +2025-07-07 10:22:01,295 - INFO - 6: torch.Size([64, 6, 1, 1]) +2025-07-07 10:22:01,295 - INFO - 7: torch.Size([128, 64, 1, 1]) +2025-07-07 10:22:01,295 - INFO - 8: torch.Size([1024, 128, 1]) +2025-07-07 10:22:01,296 - INFO - 9: torch.Size([512, 1024]) +2025-07-07 10:22:01,296 - INFO - 10: torch.Size([512]) +2025-07-07 10:22:01,296 - INFO - 11: torch.Size([512]) +2025-07-07 10:22:01,296 - INFO - 12: torch.Size([256, 512]) +2025-07-07 10:22:01,296 - INFO - 13: torch.Size([256]) +2025-07-07 10:22:01,296 - INFO - 14: torch.Size([256]) +2025-07-07 10:22:01,296 - INFO - 15: torch.Size([9, 256]) +2025-07-07 10:22:01,296 - INFO - 16: torch.Size([9]) +2025-07-07 10:22:01,296 - INFO - 17: torch.Size([64]) +2025-07-07 10:22:01,296 - INFO - 18: torch.Size([64]) +2025-07-07 10:22:01,296 - INFO - 19: torch.Size([64]) +2025-07-07 10:22:01,296 - INFO - 20: torch.Size([64]) +2025-07-07 10:22:01,296 - INFO - 21: torch.Size([64]) +2025-07-07 10:22:01,296 - INFO - 22: torch.Size([64]) +2025-07-07 10:22:01,296 - INFO - 23: torch.Size([64]) +2025-07-07 10:22:01,296 - INFO - 24: torch.Size([64]) +2025-07-07 10:22:01,296 - INFO - 25: torch.Size([64]) +2025-07-07 10:22:01,296 - INFO - 26: torch.Size([64]) +2025-07-07 10:22:01,296 - INFO - 27: torch.Size([1024]) +2025-07-07 10:22:01,296 - INFO - 28: torch.Size([1024]) +2025-07-07 10:22:01,297 - INFO - 29: torch.Size([64]) +2025-07-07 10:22:01,297 - INFO - 30: torch.Size([64]) +2025-07-07 10:22:01,297 - INFO - 31: torch.Size([256]) +2025-07-07 10:22:01,297 - INFO - 32: torch.Size([256]) +2025-07-07 10:22:01,297 - INFO - 33: torch.Size([256]) +2025-07-07 10:22:01,297 - INFO - 34: torch.Size([256]) +2025-07-07 10:22:01,297 - INFO - 35: torch.Size([128]) +2025-07-07 10:22:01,297 - INFO - 36: torch.Size([128]) +2025-07-07 10:22:01,297 - INFO - 37: torch.Size([64, 6, 1, 1]) +2025-07-07 10:22:01,297 - INFO - 38: torch.Size([64, 64, 1, 1]) +2025-07-07 10:22:01,297 - INFO - 39: torch.Size([64, 128, 1, 1]) +2025-07-07 10:22:01,297 - INFO - 40: torch.Size([64, 64, 1, 1]) +2025-07-07 10:22:01,299 - INFO - 41: torch.Size([64, 128, 1, 1]) +2025-07-07 10:22:01,299 - INFO - 42: torch.Size([1024, 192, 1]) +2025-07-07 10:22:01,299 - INFO - 43: torch.Size([64, 16, 1]) +2025-07-07 10:22:01,299 - INFO - 44: torch.Size([256, 1216, 1]) +2025-07-07 10:22:01,299 - INFO - 45: torch.Size([256, 256, 1]) +2025-07-07 10:22:01,299 - INFO - 46: torch.Size([128, 256, 1]) +2025-07-07 10:22:01,299 - INFO - 47: torch.Size([1, 128, 1]) +2025-07-07 10:39:19,035 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 10:39:19,037 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 10:39:19,407 - INFO - Using device: cuda:0 +2025-07-07 10:39:19,615 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 10:39:19,683 - INFO - ******************** +2025-07-07 10:58:29,161 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 10:58:29,162 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 10:58:29,525 - INFO - Using device: cuda:0 +2025-07-07 10:58:29,702 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 10:58:29,764 - INFO - ******************** +2025-07-07 10:58:29,810 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 10:58:29,855 - INFO - ----------data.shpe: torch.Size([1, 3, 10000]) +2025-07-07 10:58:29,861 - INFO - data: tensor([[[ 0.2947, -0.9079, -0.0716, ..., 2.8604, 2.4947, -0.0463], + [-0.7686, 0.5615, 0.3937, ..., -0.8753, -0.8538, 0.3687], + [ 0.8813, 0.4124, 0.8802, ..., 0.2749, 0.9040, 0.8848]]]) +2025-07-07 10:58:29,861 - INFO - ----------targets.shpe: torch.Size([1, 10000]) +2025-07-07 10:58:29,861 - INFO - targets: tensor([[-28.0465, 214.2780, -17.6860, ..., -91.7610, -97.4212, -12.0203]]) +2025-07-07 11:12:47,149 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 11:12:47,152 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 11:12:47,542 - INFO - Using device: cuda:0 +2025-07-07 11:12:47,805 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 11:12:47,872 - INFO - ******************** +2025-07-07 11:12:47,887 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 11:15:04,086 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 11:15:04,088 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 11:15:04,456 - INFO - Using device: cuda:0 +2025-07-07 11:15:04,638 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 11:15:04,704 - INFO - ******************** +2025-07-07 11:15:04,725 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 11:15:05,096 - INFO - vtk_file: /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM/N_S_WWS_WM_292.vtk +2025-07-07 11:15:05,107 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-07 11:15:05,151 - INFO - vtk_file: /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM/N_S_WWS_WM_215.vtk +2025-07-07 11:15:05,162 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-07 11:15:05,201 - INFO - vtk_file: /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM/N_S_WWS_WM_073.vtk +2025-07-07 11:15:05,224 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-07 11:15:05,301 - INFO - vtk_file: /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM/N_S_WWS_WM_323.vtk +2025-07-07 11:15:05,326 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-07 11:15:05,382 - INFO - vtk_file: /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM/N_S_WWS_WM_240.vtk +2025-07-07 11:15:05,382 - INFO - Evaluation Results: +2025-07-07 11:21:21,225 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 11:21:21,227 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 11:21:21,607 - INFO - Using device: cuda:0 +2025-07-07 11:21:21,788 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 11:21:21,855 - INFO - ******************** +2025-07-07 11:21:21,890 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 11:21:22,254 - INFO - data.shape: torch.Size([1, 3, 10000]) +2025-07-07 11:21:22,267 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-07 11:21:22,306 - INFO - data.shape: torch.Size([1, 3, 10000]) +2025-07-07 11:21:22,307 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-07 11:21:22,345 - INFO - data.shape: torch.Size([1, 3, 10000]) +2025-07-07 11:21:22,347 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-07 11:21:22,386 - INFO - data.shape: torch.Size([1, 3, 10000]) +2025-07-07 11:21:22,387 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-07 11:21:22,426 - INFO - data.shape: torch.Size([1, 3, 10000]) +2025-07-07 11:21:22,426 - INFO - Evaluation Results: +2025-07-07 11:45:10,029 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 11:45:10,030 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 11:45:10,403 - INFO - Using device: cuda:0 +2025-07-07 11:45:10,585 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 11:45:10,652 - INFO - ******************** +2025-07-07 11:45:10,670 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 11:45:11,132 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-07 11:47:58,442 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 11:47:58,443 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 11:47:58,821 - INFO - Using device: cuda:0 +2025-07-07 11:47:59,001 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 11:47:59,067 - INFO - ******************** +2025-07-07 11:47:59,084 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 11:47:59,440 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-07 11:47:59,440 - INFO - Sample: N_S_WWS_WM_292 +2025-07-07 11:47:59,440 - INFO - Max Error: 474.005371 +2025-07-07 11:47:59,440 - INFO - Mean Error: 19.724190 +2025-07-07 11:47:59,440 - INFO - Std Error: 27.830032 +2025-07-07 11:47:59,441 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-07 11:47:59,485 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-07 11:47:59,485 - INFO - Sample: N_S_WWS_WM_215 +2025-07-07 11:47:59,485 - INFO - Max Error: 475.202637 +2025-07-07 11:47:59,485 - INFO - Mean Error: 18.083265 +2025-07-07 11:47:59,485 - INFO - Std Error: 27.598852 +2025-07-07 11:47:59,486 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-07 11:47:59,528 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-07 11:47:59,528 - INFO - Sample: N_S_WWS_WM_073 +2025-07-07 11:47:59,528 - INFO - Max Error: 7346.312012 +2025-07-07 11:47:59,528 - INFO - Mean Error: 20.367575 +2025-07-07 11:47:59,528 - INFO - Std Error: 106.303505 +2025-07-07 11:47:59,529 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-07 11:47:59,571 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-07 11:47:59,571 - INFO - Sample: N_S_WWS_WM_323 +2025-07-07 11:47:59,571 - INFO - Max Error: 558.258301 +2025-07-07 11:47:59,572 - INFO - Mean Error: 19.368584 +2025-07-07 11:47:59,572 - INFO - Std Error: 31.317421 +2025-07-07 11:47:59,573 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-07 11:47:59,616 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-07 11:47:59,616 - INFO - Sample: N_S_WWS_WM_240 +2025-07-07 11:47:59,616 - INFO - Max Error: 568.939941 +2025-07-07 11:47:59,616 - INFO - Mean Error: 20.642050 +2025-07-07 11:47:59,616 - INFO - Std Error: 28.538563 +2025-07-07 11:47:59,616 - INFO - Evaluation Results: +2025-07-07 11:58:27,765 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 11:58:27,769 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 11:58:28,140 - INFO - Using device: cuda:0 +2025-07-07 11:58:28,324 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 11:58:28,395 - INFO - ******************** +2025-07-07 11:58:28,415 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 11:58:28,868 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-07 11:58:28,869 - INFO - Sample: N_S_WWS_WM_292 +2025-07-07 11:58:28,869 - INFO - Max Error: 474.005371 +2025-07-07 11:58:28,869 - INFO - Mean Error: 19.724190 +2025-07-07 11:58:28,869 - INFO - Std Error: 27.830032 +2025-07-07 11:58:28,870 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-07 11:58:28,917 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-07 11:58:28,918 - INFO - Sample: N_S_WWS_WM_215 +2025-07-07 11:58:28,918 - INFO - Max Error: 475.202637 +2025-07-07 11:58:28,918 - INFO - Mean Error: 18.083265 +2025-07-07 11:58:28,918 - INFO - Std Error: 27.598852 +2025-07-07 11:58:28,919 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-07 11:58:28,966 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-07 11:58:28,966 - INFO - Sample: N_S_WWS_WM_073 +2025-07-07 11:58:28,966 - INFO - Max Error: 7346.312012 +2025-07-07 11:58:28,966 - INFO - Mean Error: 20.367575 +2025-07-07 11:58:28,966 - INFO - Std Error: 106.303505 +2025-07-07 11:58:28,968 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-07 11:58:29,015 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-07 11:58:29,015 - INFO - Sample: N_S_WWS_WM_323 +2025-07-07 11:58:29,015 - INFO - Max Error: 558.258301 +2025-07-07 11:58:29,015 - INFO - Mean Error: 19.368584 +2025-07-07 11:58:29,015 - INFO - Std Error: 31.317421 +2025-07-07 11:58:29,016 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-07 11:58:29,060 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-07 11:58:29,061 - INFO - Sample: N_S_WWS_WM_240 +2025-07-07 11:58:29,061 - INFO - Max Error: 568.939941 +2025-07-07 11:58:29,061 - INFO - Mean Error: 20.642050 +2025-07-07 11:58:29,061 - INFO - Std Error: 28.538563 +2025-07-07 11:58:29,068 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-07 11:58:29,068 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-07 11:58:29,069 - INFO - Evaluation Results: +2025-07-07 12:03:06,687 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 12:03:06,690 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 12:03:07,080 - INFO - Using device: cuda:0 +2025-07-07 12:03:07,276 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 12:03:07,346 - INFO - ******************** +2025-07-07 12:03:07,361 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 12:03:07,803 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-07 12:03:07,803 - INFO - Sample: N_S_WWS_WM_292 +2025-07-07 12:03:07,803 - INFO - Max Error: 474.005371 +2025-07-07 12:03:07,803 - INFO - Mean Error: 19.724190 +2025-07-07 12:03:07,803 - INFO - Std Error: 27.830032 +2025-07-07 12:03:07,804 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-07 12:03:07,849 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-07 12:03:07,849 - INFO - Sample: N_S_WWS_WM_215 +2025-07-07 12:03:07,849 - INFO - Max Error: 475.202637 +2025-07-07 12:03:07,849 - INFO - Mean Error: 18.083265 +2025-07-07 12:03:07,849 - INFO - Std Error: 27.598852 +2025-07-07 12:03:07,850 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-07 12:03:07,895 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-07 12:03:07,895 - INFO - Sample: N_S_WWS_WM_073 +2025-07-07 12:03:07,895 - INFO - Max Error: 7346.312012 +2025-07-07 12:03:07,895 - INFO - Mean Error: 20.367575 +2025-07-07 12:03:07,895 - INFO - Std Error: 106.303505 +2025-07-07 12:03:07,896 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-07 12:03:07,941 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-07 12:03:07,941 - INFO - Sample: N_S_WWS_WM_323 +2025-07-07 12:03:07,941 - INFO - Max Error: 558.258301 +2025-07-07 12:03:07,941 - INFO - Mean Error: 19.368584 +2025-07-07 12:03:07,941 - INFO - Std Error: 31.317421 +2025-07-07 12:03:07,942 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-07 12:03:07,987 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-07 12:03:07,987 - INFO - Sample: N_S_WWS_WM_240 +2025-07-07 12:03:07,987 - INFO - Max Error: 568.939941 +2025-07-07 12:03:07,987 - INFO - Mean Error: 20.642050 +2025-07-07 12:03:07,987 - INFO - Std Error: 28.538563 +2025-07-07 12:03:07,988 - INFO - all_metrics[0].keys(): dict_keys(['MSE', 'MAE', 'RMSE', 'Max_Error', 'Rel_L2', 'Rel_L1']) +2025-07-07 12:03:08,013 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-07 12:03:08,013 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-07 12:03:08,013 - INFO - Evaluation Results: +2025-07-07 14:35:48,804 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 14:35:48,804 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 14:35:49,060 - INFO - Using device: cuda:0 +2025-07-07 14:35:49,228 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 14:35:49,352 - INFO - ******************** +2025-07-07 14:35:49,362 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 14:35:49,835 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-07 14:35:49,835 - INFO - Sample: N_S_WWS_WM_292 +2025-07-07 14:35:49,835 - INFO - Max Error: 474.005371 +2025-07-07 14:35:49,835 - INFO - Mean Error: 19.724190 +2025-07-07 14:35:49,835 - INFO - Std Error: 27.830032 +2025-07-07 14:35:49,836 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-07 14:35:49,882 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-07 14:35:49,882 - INFO - Sample: N_S_WWS_WM_215 +2025-07-07 14:35:49,882 - INFO - Max Error: 475.202637 +2025-07-07 14:35:49,882 - INFO - Mean Error: 18.083265 +2025-07-07 14:35:49,883 - INFO - Std Error: 27.598852 +2025-07-07 14:35:49,883 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-07 14:35:49,925 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-07 14:35:49,925 - INFO - Sample: N_S_WWS_WM_073 +2025-07-07 14:35:49,926 - INFO - Max Error: 7346.312012 +2025-07-07 14:35:49,926 - INFO - Mean Error: 20.367575 +2025-07-07 14:35:49,926 - INFO - Std Error: 106.303505 +2025-07-07 14:35:49,926 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-07 14:35:49,966 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-07 14:35:49,966 - INFO - Sample: N_S_WWS_WM_323 +2025-07-07 14:35:49,966 - INFO - Max Error: 558.258301 +2025-07-07 14:35:49,966 - INFO - Mean Error: 19.368584 +2025-07-07 14:35:49,966 - INFO - Std Error: 31.317421 +2025-07-07 14:35:49,966 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-07 14:35:50,009 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-07 14:35:50,009 - INFO - Sample: N_S_WWS_WM_240 +2025-07-07 14:35:50,009 - INFO - Max Error: 568.939941 +2025-07-07 14:35:50,009 - INFO - Mean Error: 20.642050 +2025-07-07 14:35:50,009 - INFO - Std Error: 28.538563 +2025-07-07 14:35:50,009 - INFO - all_metrics: [{'MSE': np.float32(0.08463714), 'MAE': np.float32(0.1682234), 'RMSE': np.float32(0.29092464), 'Max_Error': np.float32(4.0426903), 'Rel_L2': np.float32(2.314728), 'Rel_L1': np.float32(2.314728)}, {'MSE': np.float32(0.07919231), 'MAE': np.float32(0.1542283), 'RMSE': np.float32(0.2814113), 'Max_Error': np.float32(4.0529013), 'Rel_L2': np.float32(2.2958546), 'Rel_L1': np.float32(2.2958546)}, {'MSE': np.float32(0.852171), 'MAE': np.float32(0.17371066), 'RMSE': np.float32(0.9231311), 'Max_Error': np.float32(62.655117), 'Rel_L2': np.float32(1.8907375), 'Rel_L1': np.float32(1.8907375)}, {'MSE': np.float32(0.09863006), 'MAE': np.float32(0.16519047), 'RMSE': np.float32(0.31405425), 'Max_Error': np.float32(4.7612653), 'Rel_L2': np.float32(1.4931684), 'Rel_L1': np.float32(1.4931684)}, {'MSE': np.float32(0.09023737), 'MAE': np.float32(0.17605162), 'RMSE': np.float32(0.30039537), 'Max_Error': np.float32(4.852367), 'Rel_L2': np.float32(1.6304951), 'Rel_L1': np.float32(1.6304951)}] +2025-07-07 14:37:07,988 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 14:37:07,990 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 14:37:08,234 - INFO - Using device: cuda:0 +2025-07-07 14:37:08,400 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 14:37:08,514 - INFO - ******************** +2025-07-07 14:37:08,525 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 14:37:08,931 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-07 14:37:08,932 - INFO - Sample: N_S_WWS_WM_292 +2025-07-07 14:37:08,932 - INFO - Max Error: 474.005371 +2025-07-07 14:37:08,932 - INFO - Mean Error: 19.724190 +2025-07-07 14:37:08,932 - INFO - Std Error: 27.830032 +2025-07-07 14:37:08,932 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-07 14:37:08,975 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-07 14:37:08,975 - INFO - Sample: N_S_WWS_WM_215 +2025-07-07 14:37:08,975 - INFO - Max Error: 475.202637 +2025-07-07 14:37:08,976 - INFO - Mean Error: 18.083265 +2025-07-07 14:37:08,976 - INFO - Std Error: 27.598852 +2025-07-07 14:37:08,976 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-07 14:37:09,019 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-07 14:37:09,019 - INFO - Sample: N_S_WWS_WM_073 +2025-07-07 14:37:09,019 - INFO - Max Error: 7346.312012 +2025-07-07 14:37:09,019 - INFO - Mean Error: 20.367575 +2025-07-07 14:37:09,019 - INFO - Std Error: 106.303505 +2025-07-07 14:37:09,020 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-07 14:37:09,065 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-07 14:37:09,065 - INFO - Sample: N_S_WWS_WM_323 +2025-07-07 14:37:09,066 - INFO - Max Error: 558.258301 +2025-07-07 14:37:09,066 - INFO - Mean Error: 19.368584 +2025-07-07 14:37:09,066 - INFO - Std Error: 31.317421 +2025-07-07 14:37:09,066 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-07 14:37:09,108 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-07 14:37:09,108 - INFO - Sample: N_S_WWS_WM_240 +2025-07-07 14:37:09,108 - INFO - Max Error: 568.939941 +2025-07-07 14:37:09,108 - INFO - Mean Error: 20.642050 +2025-07-07 14:37:09,108 - INFO - Std Error: 28.538563 +2025-07-07 14:37:09,108 - INFO - all_metrics: [{'MSE': np.float32(0.08463714), 'MAE': np.float32(0.1682234), 'RMSE': np.float32(0.29092464), 'Max_Error': np.float32(4.0426903), 'Rel_L2': np.float32(2.314728), 'Rel_L1': np.float32(2.314728)}, {'MSE': np.float32(0.07919231), 'MAE': np.float32(0.1542283), 'RMSE': np.float32(0.2814113), 'Max_Error': np.float32(4.0529013), 'Rel_L2': np.float32(2.2958546), 'Rel_L1': np.float32(2.2958546)}, {'MSE': np.float32(0.852171), 'MAE': np.float32(0.17371066), 'RMSE': np.float32(0.9231311), 'Max_Error': np.float32(62.655117), 'Rel_L2': np.float32(1.8907375), 'Rel_L1': np.float32(1.8907375)}, {'MSE': np.float32(0.09863006), 'MAE': np.float32(0.16519047), 'RMSE': np.float32(0.31405425), 'Max_Error': np.float32(4.7612653), 'Rel_L2': np.float32(1.4931684), 'Rel_L1': np.float32(1.4931684)}, {'MSE': np.float32(0.09023737), 'MAE': np.float32(0.17605162), 'RMSE': np.float32(0.30039537), 'Max_Error': np.float32(4.852367), 'Rel_L2': np.float32(1.6304951), 'Rel_L1': np.float32(1.6304951)}] +2025-07-07 14:37:09,109 - INFO - all_metrics[0].keys(): dict_keys(['MSE', 'MAE', 'RMSE', 'Max_Error', 'Rel_L2', 'Rel_L1']) +2025-07-07 14:37:09,109 - INFO - all_metrics[0].items(): dict_items([('MSE', np.float32(0.08463714)), ('MAE', np.float32(0.1682234)), ('RMSE', np.float32(0.29092464)), ('Max_Error', np.float32(4.0426903)), ('Rel_L2', np.float32(2.314728)), ('Rel_L1', np.float32(2.314728))]) +2025-07-07 14:37:09,109 - INFO - MSE: 0.08463714271783829 +2025-07-07 14:37:09,109 - INFO - MAE: 0.16822339594364166 +2025-07-07 14:37:09,109 - INFO - RMSE: 0.29092463850975037 +2025-07-07 14:37:09,109 - INFO - Max_Error: 4.042690277099609 +2025-07-07 14:37:09,109 - INFO - Rel_L2: 2.314728021621704 +2025-07-07 14:37:09,109 - INFO - Rel_L1: 2.314728021621704 +2025-07-07 14:37:09,112 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-07 14:37:09,112 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-07 14:37:09,112 - INFO - Evaluation Results: +2025-07-07 14:45:36,442 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 14:45:36,446 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 14:45:36,785 - INFO - Using device: cuda:0 +2025-07-07 14:45:37,013 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 14:45:37,034 - INFO - ******************** +2025-07-07 14:45:37,058 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 14:45:37,541 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-07 14:45:37,541 - INFO - Sample: N_S_WWS_WM_292 +2025-07-07 14:45:37,541 - INFO - Max Error: 474.005371 +2025-07-07 14:45:37,541 - INFO - Mean Error: 19.724190 +2025-07-07 14:45:37,541 - INFO - Std Error: 27.830032 +2025-07-07 14:45:37,542 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-07 14:45:37,593 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-07 14:45:37,593 - INFO - Sample: N_S_WWS_WM_215 +2025-07-07 14:45:37,593 - INFO - Max Error: 475.202637 +2025-07-07 14:45:37,593 - INFO - Mean Error: 18.083265 +2025-07-07 14:45:37,593 - INFO - Std Error: 27.598852 +2025-07-07 14:45:37,594 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-07 14:45:37,644 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-07 14:45:37,644 - INFO - Sample: N_S_WWS_WM_073 +2025-07-07 14:45:37,644 - INFO - Max Error: 7346.312012 +2025-07-07 14:45:37,644 - INFO - Mean Error: 20.367575 +2025-07-07 14:45:37,645 - INFO - Std Error: 106.303505 +2025-07-07 14:45:37,646 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-07 14:45:37,692 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-07 14:45:37,693 - INFO - Sample: N_S_WWS_WM_323 +2025-07-07 14:45:37,693 - INFO - Max Error: 558.258301 +2025-07-07 14:45:37,693 - INFO - Mean Error: 19.368584 +2025-07-07 14:45:37,693 - INFO - Std Error: 31.317421 +2025-07-07 14:45:37,693 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-07 14:45:37,741 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-07 14:45:37,742 - INFO - Sample: N_S_WWS_WM_240 +2025-07-07 14:45:37,742 - INFO - Max Error: 568.939941 +2025-07-07 14:45:37,742 - INFO - Mean Error: 20.642050 +2025-07-07 14:45:37,742 - INFO - Std Error: 28.538563 +2025-07-07 14:45:37,742 - INFO - all_metrics: [{'MSE': np.float32(0.08463714), 'MAE': np.float32(0.1682234), 'RMSE': np.float32(0.29092464), 'Max_Error': np.float32(4.0426903), 'Rel_L2': np.float32(2.314728), 'Rel_L1': np.float32(2.314728)}, {'MSE': np.float32(0.07919231), 'MAE': np.float32(0.1542283), 'RMSE': np.float32(0.2814113), 'Max_Error': np.float32(4.0529013), 'Rel_L2': np.float32(2.2958546), 'Rel_L1': np.float32(2.2958546)}, {'MSE': np.float32(0.852171), 'MAE': np.float32(0.17371066), 'RMSE': np.float32(0.9231311), 'Max_Error': np.float32(62.655117), 'Rel_L2': np.float32(1.8907375), 'Rel_L1': np.float32(1.8907375)}, {'MSE': np.float32(0.09863006), 'MAE': np.float32(0.16519047), 'RMSE': np.float32(0.31405425), 'Max_Error': np.float32(4.7612653), 'Rel_L2': np.float32(1.4931684), 'Rel_L1': np.float32(1.4931684)}, {'MSE': np.float32(0.09023737), 'MAE': np.float32(0.17605162), 'RMSE': np.float32(0.30039537), 'Max_Error': np.float32(4.852367), 'Rel_L2': np.float32(1.6304951), 'Rel_L1': np.float32(1.6304951)}] +2025-07-07 14:45:37,742 - INFO - all_metrics[0].keys(): dict_keys(['MSE', 'MAE', 'RMSE', 'Max_Error', 'Rel_L2', 'Rel_L1']) +2025-07-07 14:45:37,742 - INFO - all_metrics[0].items(): dict_items([('MSE', np.float32(0.08463714)), ('MAE', np.float32(0.1682234)), ('RMSE', np.float32(0.29092464)), ('Max_Error', np.float32(4.0426903)), ('Rel_L2', np.float32(2.314728)), ('Rel_L1', np.float32(2.314728))]) +2025-07-07 14:45:37,742 - INFO - MSE: 0.08463714271783829 +2025-07-07 14:45:37,742 - INFO - MAE: 0.16822339594364166 +2025-07-07 14:45:37,742 - INFO - RMSE: 0.29092463850975037 +2025-07-07 14:45:37,742 - INFO - Max_Error: 4.042690277099609 +2025-07-07 14:45:37,742 - INFO - Rel_L2: 2.314728021621704 +2025-07-07 14:45:37,742 - INFO - Rel_L1: 2.314728021621704 +2025-07-07 14:45:37,751 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-07 14:45:37,751 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-07 14:45:37,751 - INFO - Evaluation Results: +2025-07-07 14:47:16,513 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 14:47:16,514 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 14:47:16,859 - INFO - Using device: cuda:0 +2025-07-07 14:47:17,082 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 14:47:17,097 - INFO - ******************** +2025-07-07 14:47:17,111 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 14:47:17,454 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-07 14:47:17,455 - INFO - Sample: N_S_WWS_WM_292 +2025-07-07 14:47:17,455 - INFO - Max Error: 474.005371 +2025-07-07 14:47:17,455 - INFO - Mean Error: 19.724190 +2025-07-07 14:47:17,455 - INFO - Std Error: 27.830032 +2025-07-07 14:47:17,456 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-07 14:47:17,502 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-07 14:47:17,502 - INFO - Sample: N_S_WWS_WM_215 +2025-07-07 14:47:17,502 - INFO - Max Error: 475.202637 +2025-07-07 14:47:17,502 - INFO - Mean Error: 18.083265 +2025-07-07 14:47:17,502 - INFO - Std Error: 27.598852 +2025-07-07 14:47:17,503 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-07 14:47:17,547 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-07 14:47:17,547 - INFO - Sample: N_S_WWS_WM_073 +2025-07-07 14:47:17,547 - INFO - Max Error: 7346.312012 +2025-07-07 14:47:17,547 - INFO - Mean Error: 20.367575 +2025-07-07 14:47:17,547 - INFO - Std Error: 106.303505 +2025-07-07 14:47:17,548 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-07 14:47:17,592 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-07 14:47:17,592 - INFO - Sample: N_S_WWS_WM_323 +2025-07-07 14:47:17,593 - INFO - Max Error: 558.258301 +2025-07-07 14:47:17,593 - INFO - Mean Error: 19.368584 +2025-07-07 14:47:17,593 - INFO - Std Error: 31.317421 +2025-07-07 14:47:17,594 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-07 14:47:17,636 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-07 14:47:17,636 - INFO - Sample: N_S_WWS_WM_240 +2025-07-07 14:47:17,637 - INFO - Max Error: 568.939941 +2025-07-07 14:47:17,637 - INFO - Mean Error: 20.642050 +2025-07-07 14:47:17,637 - INFO - Std Error: 28.538563 +2025-07-07 14:47:17,637 - INFO - all_metrics: [{'MSE': np.float32(0.08463714), 'MAE': np.float32(0.1682234), 'RMSE': np.float32(0.29092464), 'Max_Error': np.float32(4.0426903), 'Rel_L2': np.float32(2.314728), 'Rel_L1': np.float32(2.314728)}, {'MSE': np.float32(0.07919231), 'MAE': np.float32(0.1542283), 'RMSE': np.float32(0.2814113), 'Max_Error': np.float32(4.0529013), 'Rel_L2': np.float32(2.2958546), 'Rel_L1': np.float32(2.2958546)}, {'MSE': np.float32(0.852171), 'MAE': np.float32(0.17371066), 'RMSE': np.float32(0.9231311), 'Max_Error': np.float32(62.655117), 'Rel_L2': np.float32(1.8907375), 'Rel_L1': np.float32(1.8907375)}, {'MSE': np.float32(0.09863006), 'MAE': np.float32(0.16519047), 'RMSE': np.float32(0.31405425), 'Max_Error': np.float32(4.7612653), 'Rel_L2': np.float32(1.4931684), 'Rel_L1': np.float32(1.4931684)}, {'MSE': np.float32(0.09023737), 'MAE': np.float32(0.17605162), 'RMSE': np.float32(0.30039537), 'Max_Error': np.float32(4.852367), 'Rel_L2': np.float32(1.6304951), 'Rel_L1': np.float32(1.6304951)}] +2025-07-07 14:47:17,637 - INFO - all_metrics[0]: {'MSE': np.float32(0.08463714), 'MAE': np.float32(0.1682234), 'RMSE': np.float32(0.29092464), 'Max_Error': np.float32(4.0426903), 'Rel_L2': np.float32(2.314728), 'Rel_L1': np.float32(2.314728)} +2025-07-07 14:47:17,637 - INFO - all_metrics[0].keys(): dict_keys(['MSE', 'MAE', 'RMSE', 'Max_Error', 'Rel_L2', 'Rel_L1']) +2025-07-07 14:47:17,637 - INFO - all_metrics[0].items(): dict_items([('MSE', np.float32(0.08463714)), ('MAE', np.float32(0.1682234)), ('RMSE', np.float32(0.29092464)), ('Max_Error', np.float32(4.0426903)), ('Rel_L2', np.float32(2.314728)), ('Rel_L1', np.float32(2.314728))]) +2025-07-07 14:47:17,637 - INFO - MSE: 0.08463714271783829 +2025-07-07 14:47:17,637 - INFO - MAE: 0.16822339594364166 +2025-07-07 14:47:17,637 - INFO - RMSE: 0.29092463850975037 +2025-07-07 14:47:17,637 - INFO - Max_Error: 4.042690277099609 +2025-07-07 14:47:17,637 - INFO - Rel_L2: 2.314728021621704 +2025-07-07 14:47:17,637 - INFO - Rel_L1: 2.314728021621704 +2025-07-07 14:47:17,641 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-07 14:47:17,641 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-07 14:47:17,641 - INFO - Evaluation Results: +2025-07-07 14:53:04,986 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 14:53:04,993 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 14:53:05,355 - INFO - Using device: cuda:0 +2025-07-07 14:53:05,595 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 14:53:05,614 - INFO - ******************** +2025-07-07 14:53:05,634 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 14:53:06,100 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-07 14:53:06,105 - INFO - Sample: N_S_WWS_WM_292 +2025-07-07 14:53:06,105 - INFO - Max Error: 474.005371 +2025-07-07 14:53:06,105 - INFO - Mean Error: 19.724190 +2025-07-07 14:53:06,105 - INFO - Std Error: 27.830032 +2025-07-07 14:53:06,106 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-07 14:53:06,153 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-07 14:53:06,153 - INFO - Sample: N_S_WWS_WM_215 +2025-07-07 14:53:06,153 - INFO - Max Error: 475.202637 +2025-07-07 14:53:06,153 - INFO - Mean Error: 18.083265 +2025-07-07 14:53:06,153 - INFO - Std Error: 27.598852 +2025-07-07 14:53:06,154 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-07 14:53:06,200 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-07 14:53:06,200 - INFO - Sample: N_S_WWS_WM_073 +2025-07-07 14:53:06,200 - INFO - Max Error: 7346.312012 +2025-07-07 14:53:06,200 - INFO - Mean Error: 20.367575 +2025-07-07 14:53:06,200 - INFO - Std Error: 106.303505 +2025-07-07 14:53:06,201 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-07 14:53:06,247 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-07 14:53:06,248 - INFO - Sample: N_S_WWS_WM_323 +2025-07-07 14:53:06,248 - INFO - Max Error: 558.258301 +2025-07-07 14:53:06,248 - INFO - Mean Error: 19.368584 +2025-07-07 14:53:06,248 - INFO - Std Error: 31.317421 +2025-07-07 14:53:06,249 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-07 14:53:06,296 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-07 14:53:06,297 - INFO - Sample: N_S_WWS_WM_240 +2025-07-07 14:53:06,297 - INFO - Max Error: 568.939941 +2025-07-07 14:53:06,297 - INFO - Mean Error: 20.642050 +2025-07-07 14:53:06,297 - INFO - Std Error: 28.538563 +2025-07-07 14:53:06,297 - INFO - all_metrics: [{'MSE': np.float32(0.08463714), 'MAE': np.float32(0.1682234), 'RMSE': np.float32(0.29092464), 'Max_Error': np.float32(4.0426903), 'Rel_L2': np.float32(2.314728), 'Rel_L1': np.float32(2.314728)}, {'MSE': np.float32(0.07919231), 'MAE': np.float32(0.1542283), 'RMSE': np.float32(0.2814113), 'Max_Error': np.float32(4.0529013), 'Rel_L2': np.float32(2.2958546), 'Rel_L1': np.float32(2.2958546)}, {'MSE': np.float32(0.852171), 'MAE': np.float32(0.17371066), 'RMSE': np.float32(0.9231311), 'Max_Error': np.float32(62.655117), 'Rel_L2': np.float32(1.8907375), 'Rel_L1': np.float32(1.8907375)}, {'MSE': np.float32(0.09863006), 'MAE': np.float32(0.16519047), 'RMSE': np.float32(0.31405425), 'Max_Error': np.float32(4.7612653), 'Rel_L2': np.float32(1.4931684), 'Rel_L1': np.float32(1.4931684)}, {'MSE': np.float32(0.09023737), 'MAE': np.float32(0.17605162), 'RMSE': np.float32(0.30039537), 'Max_Error': np.float32(4.852367), 'Rel_L2': np.float32(1.6304951), 'Rel_L1': np.float32(1.6304951)}] +2025-07-07 14:53:06,297 - INFO - all_metrics[0]: {'MSE': np.float32(0.08463714), 'MAE': np.float32(0.1682234), 'RMSE': np.float32(0.29092464), 'Max_Error': np.float32(4.0426903), 'Rel_L2': np.float32(2.314728), 'Rel_L1': np.float32(2.314728)} +2025-07-07 14:53:06,298 - INFO - all_metrics[0].keys(): dict_keys(['MSE', 'MAE', 'RMSE', 'Max_Error', 'Rel_L2', 'Rel_L1']) +2025-07-07 14:53:06,298 - INFO - all_metrics[0].items(): dict_items([('MSE', np.float32(0.08463714)), ('MAE', np.float32(0.1682234)), ('RMSE', np.float32(0.29092464)), ('Max_Error', np.float32(4.0426903)), ('Rel_L2', np.float32(2.314728)), ('Rel_L1', np.float32(2.314728))]) +2025-07-07 14:53:06,298 - INFO - MSE: 0.08463714271783829 +2025-07-07 14:53:06,298 - INFO - MAE: 0.16822339594364166 +2025-07-07 14:53:06,298 - INFO - RMSE: 0.29092463850975037 +2025-07-07 14:53:06,298 - INFO - Max_Error: 4.042690277099609 +2025-07-07 14:53:06,298 - INFO - Rel_L2: 2.314728021621704 +2025-07-07 14:53:06,298 - INFO - Rel_L1: 2.314728021621704 +2025-07-07 14:53:06,302 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-07 14:53:06,303 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-07 14:53:06,303 - INFO - Evaluation Results: +2025-07-07 15:11:19,480 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 15:11:19,487 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 15:11:19,903 - INFO - Using device: cuda:0 +2025-07-07 15:11:20,135 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 15:11:20,155 - INFO - ******************** +2025-07-07 15:11:20,174 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 15:11:20,633 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-07 15:11:20,638 - INFO - Sample: N_S_WWS_WM_292 +2025-07-07 15:11:20,638 - INFO - Max Error: 474.005371 +2025-07-07 15:11:20,638 - INFO - Mean Error: 19.724190 +2025-07-07 15:11:20,638 - INFO - Std Error: 27.830032 +2025-07-07 15:11:20,639 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-07 15:11:20,712 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-07 15:11:20,712 - INFO - Sample: N_S_WWS_WM_215 +2025-07-07 15:11:20,712 - INFO - Max Error: 475.202637 +2025-07-07 15:11:20,712 - INFO - Mean Error: 18.083265 +2025-07-07 15:11:20,712 - INFO - Std Error: 27.598852 +2025-07-07 15:11:20,713 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-07 15:11:20,760 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-07 15:11:20,760 - INFO - Sample: N_S_WWS_WM_073 +2025-07-07 15:11:20,761 - INFO - Max Error: 7346.312012 +2025-07-07 15:11:20,761 - INFO - Mean Error: 20.367575 +2025-07-07 15:11:20,761 - INFO - Std Error: 106.303505 +2025-07-07 15:11:20,762 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-07 15:11:20,808 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-07 15:11:20,808 - INFO - Sample: N_S_WWS_WM_323 +2025-07-07 15:11:20,808 - INFO - Max Error: 558.258301 +2025-07-07 15:11:20,808 - INFO - Mean Error: 19.368584 +2025-07-07 15:11:20,808 - INFO - Std Error: 31.317421 +2025-07-07 15:11:20,809 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-07 15:11:20,857 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-07 15:11:20,858 - INFO - Sample: N_S_WWS_WM_240 +2025-07-07 15:11:20,858 - INFO - Max Error: 568.939941 +2025-07-07 15:11:20,858 - INFO - Mean Error: 20.642050 +2025-07-07 15:11:20,858 - INFO - Std Error: 28.538563 +2025-07-07 15:11:20,858 - INFO - all_metrics[0].items(): dict_items([('MSE', np.float32(0.08463714)), ('MAE', np.float32(0.1682234)), ('RMSE', np.float32(0.29092464)), ('Max_Error', np.float32(4.0426903)), ('Rel_L2', np.float32(2.314728)), ('Rel_L1', np.float32(2.314728))]) +2025-07-07 15:11:20,859 - INFO - all_metrics[0].items(): dict_items([('MSE', np.float32(0.08463714)), ('MAE', np.float32(0.1682234)), ('RMSE', np.float32(0.29092464)), ('Max_Error', np.float32(4.0426903)), ('Rel_L2', np.float32(2.314728)), ('Rel_L1', np.float32(2.314728))]) +2025-07-07 15:11:20,859 - INFO - MSE_mean: 0.2409735918045044  +2025-07-07 15:11:20,859 - INFO - MSE_std: 0.30566635727882385  +2025-07-07 15:11:20,859 - INFO - MAE_mean: 0.16748088598251343  +2025-07-07 15:11:20,859 - INFO - MAE_std: 0.0076641482301056385  +2025-07-07 15:11:20,859 - INFO - RMSE_mean: 0.4219833314418793  +2025-07-07 15:11:20,859 - INFO - RMSE_std: 0.2508060038089752  +2025-07-07 15:11:20,859 - INFO - Max_Error_mean: 16.07286834716797  +2025-07-07 15:11:20,859 - INFO - Max_Error_std: 23.293617248535156  +2025-07-07 15:11:20,859 - INFO - Rel_L2_mean: 1.9249966144561768  +2025-07-07 15:11:20,859 - INFO - Rel_L2_std: 0.33579954504966736  +2025-07-07 15:11:20,859 - INFO - Rel_L1_mean: 1.9249966144561768  +2025-07-07 15:11:20,859 - INFO - Rel_L1_std: 0.33579954504966736  +2025-07-07 15:11:20,863 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-07 15:11:20,863 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-07 15:11:20,863 - INFO - Evaluation Results: +2025-07-07 15:21:58,682 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 15:21:58,686 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 15:21:59,145 - INFO - Using device: cuda:0 +2025-07-07 15:21:59,365 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 15:21:59,392 - INFO - ******************** +2025-07-07 15:21:59,404 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 15:21:59,854 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-07 15:21:59,855 - INFO - Sample: N_S_WWS_WM_292 +2025-07-07 15:21:59,855 - INFO - Max Error: 474.005371 +2025-07-07 15:21:59,855 - INFO - Mean Error: 19.724190 +2025-07-07 15:21:59,855 - INFO - Std Error: 27.830032 +2025-07-07 15:21:59,856 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-07 15:21:59,900 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-07 15:21:59,900 - INFO - Sample: N_S_WWS_WM_215 +2025-07-07 15:21:59,900 - INFO - Max Error: 475.202637 +2025-07-07 15:21:59,900 - INFO - Mean Error: 18.083265 +2025-07-07 15:21:59,900 - INFO - Std Error: 27.598852 +2025-07-07 15:21:59,900 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-07 15:21:59,945 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-07 15:21:59,945 - INFO - Sample: N_S_WWS_WM_073 +2025-07-07 15:21:59,947 - INFO - Max Error: 7346.312012 +2025-07-07 15:21:59,947 - INFO - Mean Error: 20.367575 +2025-07-07 15:21:59,947 - INFO - Std Error: 106.303505 +2025-07-07 15:21:59,948 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-07 15:21:59,990 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-07 15:21:59,991 - INFO - Sample: N_S_WWS_WM_323 +2025-07-07 15:21:59,991 - INFO - Max Error: 558.258301 +2025-07-07 15:21:59,991 - INFO - Mean Error: 19.368584 +2025-07-07 15:21:59,991 - INFO - Std Error: 31.317421 +2025-07-07 15:21:59,991 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-07 15:22:00,033 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-07 15:22:00,033 - INFO - Sample: N_S_WWS_WM_240 +2025-07-07 15:22:00,033 - INFO - Max Error: 568.939941 +2025-07-07 15:22:00,033 - INFO - Mean Error: 20.642050 +2025-07-07 15:22:00,033 - INFO - Std Error: 28.538563 +2025-07-07 15:22:00,039 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-07 15:22:00,039 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-07 15:22:00,039 - INFO - Evaluation Results: +2025-07-07 15:23:34,204 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-07 15:23:34,210 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-07 15:23:34,560 - INFO - Using device: cuda:0 +2025-07-07 15:23:34,777 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-07 15:23:34,796 - INFO - ******************** +2025-07-07 15:23:34,809 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 15:23:35,120 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-07 15:23:35,120 - INFO - Sample: N_S_WWS_WM_292 +2025-07-07 15:23:35,120 - INFO - Max Error: 474.005371 +2025-07-07 15:23:35,120 - INFO - Mean Error: 19.724190 +2025-07-07 15:23:35,120 - INFO - Std Error: 27.830032 +2025-07-07 15:23:35,120 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-07 15:23:35,162 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-07 15:23:35,162 - INFO - Sample: N_S_WWS_WM_215 +2025-07-07 15:23:35,162 - INFO - Max Error: 475.202637 +2025-07-07 15:23:35,162 - INFO - Mean Error: 18.083265 +2025-07-07 15:23:35,162 - INFO - Std Error: 27.598852 +2025-07-07 15:23:35,162 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-07 15:23:35,206 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-07 15:23:35,206 - INFO - Sample: N_S_WWS_WM_073 +2025-07-07 15:23:35,206 - INFO - Max Error: 7346.312012 +2025-07-07 15:23:35,206 - INFO - Mean Error: 20.367575 +2025-07-07 15:23:35,206 - INFO - Std Error: 106.303505 +2025-07-07 15:23:35,207 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-07 15:23:35,249 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-07 15:23:35,249 - INFO - Sample: N_S_WWS_WM_323 +2025-07-07 15:23:35,249 - INFO - Max Error: 558.258301 +2025-07-07 15:23:35,249 - INFO - Mean Error: 19.368584 +2025-07-07 15:23:35,250 - INFO - Std Error: 31.317421 +2025-07-07 15:23:35,251 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-07 15:23:35,295 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-07 15:23:35,295 - INFO - Sample: N_S_WWS_WM_240 +2025-07-07 15:23:35,295 - INFO - Max Error: 568.939941 +2025-07-07 15:23:35,295 - INFO - Mean Error: 20.642050 +2025-07-07 15:23:35,295 - INFO - Std Error: 28.538563 +2025-07-07 15:23:35,319 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-07 15:23:35,319 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-07 15:23:35,319 - INFO - Evaluation Results: +2025-07-07 15:23:35,319 - INFO - MSE_mean: 0.240974 +2025-07-07 15:23:35,319 - INFO - MSE_std: 0.305666 +2025-07-07 15:23:35,319 - INFO - MAE_mean: 0.167481 +2025-07-07 15:23:35,319 - INFO - MAE_std: 0.007664 +2025-07-07 15:23:35,319 - INFO - RMSE_mean: 0.421983 +2025-07-07 15:23:35,319 - INFO - RMSE_std: 0.250806 +2025-07-07 15:23:35,319 - INFO - Max_Error_mean: 16.072868 +2025-07-07 15:23:35,319 - INFO - Max_Error_std: 23.293617 +2025-07-07 15:23:35,319 - INFO - Rel_L2_mean: 1.924997 +2025-07-07 15:23:35,319 - INFO - Rel_L2_std: 0.335800 +2025-07-07 15:23:35,319 - INFO - Rel_L1_mean: 1.924997 +2025-07-07 15:23:35,319 - INFO - Rel_L1_std: 0.335800 +2025-07-08 08:15:35,813 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-08 08:15:35,819 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-08 08:15:36,223 - INFO - Using device: cuda:0 +2025-07-08 08:15:36,483 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-08 08:15:36,514 - INFO - ******************** +2025-07-08 08:15:36,631 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-08 08:16:26,748 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-08 08:16:26,755 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-08 08:16:27,154 - INFO - Using device: cuda:0 +2025-07-08 08:16:27,393 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-08 08:16:27,415 - INFO - ******************** +2025-07-08 08:16:27,453 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-08 08:16:27,814 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:16:27,814 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:16:27,841 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-08 08:16:27,846 - INFO - Sample: N_S_WWS_WM_292 +2025-07-08 08:16:27,846 - INFO - Max Error: 474.005371 +2025-07-08 08:16:27,846 - INFO - Mean Error: 19.724190 +2025-07-08 08:16:27,846 - INFO - Std Error: 27.830032 +2025-07-08 08:16:27,861 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-08 08:16:27,919 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:16:27,919 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:16:27,949 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-08 08:16:27,950 - INFO - Sample: N_S_WWS_WM_215 +2025-07-08 08:16:27,950 - INFO - Max Error: 475.202637 +2025-07-08 08:16:27,950 - INFO - Mean Error: 18.083265 +2025-07-08 08:16:27,950 - INFO - Std Error: 27.598852 +2025-07-08 08:16:27,965 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-08 08:16:28,004 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:16:28,004 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:16:28,013 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-08 08:16:28,013 - INFO - Sample: N_S_WWS_WM_073 +2025-07-08 08:16:28,013 - INFO - Max Error: 7346.312012 +2025-07-08 08:16:28,013 - INFO - Mean Error: 20.367575 +2025-07-08 08:16:28,013 - INFO - Std Error: 106.303505 +2025-07-08 08:16:28,031 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-08 08:16:28,086 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:16:28,086 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:16:28,093 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-08 08:16:28,093 - INFO - Sample: N_S_WWS_WM_323 +2025-07-08 08:16:28,093 - INFO - Max Error: 558.258301 +2025-07-08 08:16:28,093 - INFO - Mean Error: 19.368584 +2025-07-08 08:16:28,093 - INFO - Std Error: 31.317421 +2025-07-08 08:16:28,108 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-08 08:16:28,158 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:16:28,158 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:16:28,164 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-08 08:16:28,164 - INFO - Sample: N_S_WWS_WM_240 +2025-07-08 08:16:28,164 - INFO - Max Error: 568.939941 +2025-07-08 08:16:28,165 - INFO - Mean Error: 20.642050 +2025-07-08 08:16:28,165 - INFO - Std Error: 28.538563 +2025-07-08 08:16:28,188 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-08 08:16:28,188 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-08 08:16:28,188 - INFO - Evaluation Results: +2025-07-08 08:16:28,188 - INFO - MSE_mean: 0.240974 +2025-07-08 08:16:28,188 - INFO - MSE_std: 0.305666 +2025-07-08 08:16:28,188 - INFO - MAE_mean: 0.167481 +2025-07-08 08:16:28,188 - INFO - MAE_std: 0.007664 +2025-07-08 08:16:28,188 - INFO - RMSE_mean: 0.421983 +2025-07-08 08:16:28,188 - INFO - RMSE_std: 0.250806 +2025-07-08 08:16:28,188 - INFO - Max_Error_mean: 16.072868 +2025-07-08 08:16:28,188 - INFO - Max_Error_std: 23.293617 +2025-07-08 08:16:28,188 - INFO - Rel_L2_mean: 1.924997 +2025-07-08 08:16:28,188 - INFO - Rel_L2_std: 0.335800 +2025-07-08 08:16:28,188 - INFO - Rel_L1_mean: 1.924997 +2025-07-08 08:16:28,188 - INFO - Rel_L1_std: 0.335800 +2025-07-08 08:17:43,341 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-08 08:17:43,347 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-08 08:17:43,764 - INFO - Using device: cuda:0 +2025-07-08 08:17:43,993 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-08 08:17:44,013 - INFO - ******************** +2025-07-08 08:17:44,030 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-08 08:22:16,885 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-08 08:22:16,892 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-08 08:22:17,321 - INFO - Using device: cuda:0 +2025-07-08 08:22:17,560 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-08 08:22:17,587 - INFO - ******************** +2025-07-08 08:22:17,625 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-08 08:22:18,083 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 08:22:18,083 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 08:22:18,083 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:22:18,083 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:22:18,109 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-08 08:22:18,109 - INFO - Sample: N_S_WWS_WM_292 +2025-07-08 08:22:18,109 - INFO - Max Error: 474.005371 +2025-07-08 08:22:18,109 - INFO - Mean Error: 19.724190 +2025-07-08 08:22:18,109 - INFO - Std Error: 27.830032 +2025-07-08 08:22:18,110 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-08 08:22:18,149 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 08:22:18,150 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 08:22:18,150 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:22:18,150 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:22:18,159 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-08 08:22:18,159 - INFO - Sample: N_S_WWS_WM_215 +2025-07-08 08:22:18,159 - INFO - Max Error: 475.202637 +2025-07-08 08:22:18,159 - INFO - Mean Error: 18.083265 +2025-07-08 08:22:18,159 - INFO - Std Error: 27.598852 +2025-07-08 08:22:18,160 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-08 08:22:18,199 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 08:22:18,204 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 08:22:18,204 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:22:18,204 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:22:18,212 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-08 08:22:18,215 - INFO - Sample: N_S_WWS_WM_073 +2025-07-08 08:22:18,215 - INFO - Max Error: 7346.312012 +2025-07-08 08:22:18,215 - INFO - Mean Error: 20.367575 +2025-07-08 08:22:18,215 - INFO - Std Error: 106.303505 +2025-07-08 08:22:18,217 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-08 08:22:18,255 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 08:22:18,255 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 08:22:18,256 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:22:18,256 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:22:18,263 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-08 08:22:18,263 - INFO - Sample: N_S_WWS_WM_323 +2025-07-08 08:22:18,264 - INFO - Max Error: 558.258301 +2025-07-08 08:22:18,264 - INFO - Mean Error: 19.368584 +2025-07-08 08:22:18,264 - INFO - Std Error: 31.317421 +2025-07-08 08:22:18,265 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-08 08:22:18,303 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 08:22:18,304 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 08:22:18,304 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:22:18,304 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:22:18,312 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-08 08:22:18,312 - INFO - Sample: N_S_WWS_WM_240 +2025-07-08 08:22:18,312 - INFO - Max Error: 568.939941 +2025-07-08 08:22:18,312 - INFO - Mean Error: 20.642050 +2025-07-08 08:22:18,312 - INFO - Std Error: 28.538563 +2025-07-08 08:22:18,325 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-08 08:22:18,326 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-08 08:22:18,326 - INFO - Evaluation Results: +2025-07-08 08:22:18,326 - INFO - MSE_mean: 0.240974 +2025-07-08 08:22:18,326 - INFO - MSE_std: 0.305666 +2025-07-08 08:22:18,326 - INFO - MAE_mean: 0.167481 +2025-07-08 08:22:18,326 - INFO - MAE_std: 0.007664 +2025-07-08 08:22:18,326 - INFO - RMSE_mean: 0.421983 +2025-07-08 08:22:18,326 - INFO - RMSE_std: 0.250806 +2025-07-08 08:22:18,326 - INFO - Max_Error_mean: 16.072868 +2025-07-08 08:22:18,326 - INFO - Max_Error_std: 23.293617 +2025-07-08 08:22:18,326 - INFO - Rel_L2_mean: 1.924997 +2025-07-08 08:22:18,326 - INFO - Rel_L2_std: 0.335800 +2025-07-08 08:22:18,326 - INFO - Rel_L1_mean: 1.924997 +2025-07-08 08:22:18,326 - INFO - Rel_L1_std: 0.335800 +2025-07-08 08:30:57,653 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-08 08:30:57,660 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-08 08:30:58,056 - INFO - Using device: cuda:0 +2025-07-08 08:30:58,280 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-08 08:30:58,301 - INFO - ******************** +2025-07-08 08:30:58,337 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-08 08:30:58,688 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 08:30:58,693 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 08:30:58,693 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:30:58,693 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:30:58,721 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-08 08:30:58,721 - INFO - Sample: N_S_WWS_WM_292 +2025-07-08 08:30:58,721 - INFO - Max Error: 474.005371 +2025-07-08 08:30:58,721 - INFO - Mean Error: 19.724190 +2025-07-08 08:30:58,721 - INFO - Std Error: 27.830032 +2025-07-08 08:30:58,722 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-08 08:30:58,761 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 08:30:58,761 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 08:30:58,762 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:30:58,762 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:30:58,770 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-08 08:30:58,771 - INFO - Sample: N_S_WWS_WM_215 +2025-07-08 08:30:58,771 - INFO - Max Error: 475.202637 +2025-07-08 08:30:58,771 - INFO - Mean Error: 18.083265 +2025-07-08 08:30:58,771 - INFO - Std Error: 27.598852 +2025-07-08 08:30:58,772 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-08 08:30:58,811 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 08:30:58,811 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 08:30:58,811 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:30:58,811 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:30:58,819 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-08 08:30:58,822 - INFO - Sample: N_S_WWS_WM_073 +2025-07-08 08:30:58,822 - INFO - Max Error: 7346.312012 +2025-07-08 08:30:58,822 - INFO - Mean Error: 20.367575 +2025-07-08 08:30:58,822 - INFO - Std Error: 106.303505 +2025-07-08 08:30:58,823 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-08 08:30:58,862 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 08:30:58,862 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 08:30:58,862 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:30:58,862 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:30:58,869 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-08 08:30:58,869 - INFO - Sample: N_S_WWS_WM_323 +2025-07-08 08:30:58,869 - INFO - Max Error: 558.258301 +2025-07-08 08:30:58,869 - INFO - Mean Error: 19.368584 +2025-07-08 08:30:58,869 - INFO - Std Error: 31.317421 +2025-07-08 08:30:58,870 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-08 08:30:58,906 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 08:30:58,906 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 08:30:58,906 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 08:30:58,906 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 08:30:58,913 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-08 08:30:58,913 - INFO - Sample: N_S_WWS_WM_240 +2025-07-08 08:30:58,913 - INFO - Max Error: 568.939941 +2025-07-08 08:30:58,913 - INFO - Mean Error: 20.642050 +2025-07-08 08:30:58,913 - INFO - Std Error: 28.538563 +2025-07-08 08:30:58,931 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-08 08:30:58,931 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-08 08:30:58,931 - INFO - Evaluation Results: +2025-07-08 08:30:58,932 - INFO - MSE_mean: 0.240974 +2025-07-08 08:30:58,932 - INFO - MSE_std: 0.305666 +2025-07-08 08:30:58,932 - INFO - MAE_mean: 0.167481 +2025-07-08 08:30:58,932 - INFO - MAE_std: 0.007664 +2025-07-08 08:30:58,932 - INFO - RMSE_mean: 0.421983 +2025-07-08 08:30:58,932 - INFO - RMSE_std: 0.250806 +2025-07-08 08:30:58,932 - INFO - Max_Error_mean: 16.072868 +2025-07-08 08:30:58,932 - INFO - Max_Error_std: 23.293617 +2025-07-08 08:30:58,932 - INFO - Rel_L2_mean: 1.924997 +2025-07-08 08:30:58,932 - INFO - Rel_L2_std: 0.335800 +2025-07-08 08:30:58,932 - INFO - Rel_L1_mean: 1.924997 +2025-07-08 08:30:58,932 - INFO - Rel_L1_std: 0.335800 +2025-07-08 10:45:24,792 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-08 10:45:24,801 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-08 10:45:25,202 - INFO - Using device: cuda:0 +2025-07-08 10:45:25,454 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-08 10:45:25,484 - INFO - ******************** +2025-07-08 10:45:25,584 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-08 10:45:26,164 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 10:45:26,164 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 10:45:26,164 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 10:45:26,164 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 10:45:26,174 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-08 10:45:26,174 - INFO - Sample: N_S_WWS_WM_292 +2025-07-08 10:45:26,174 - INFO - Max Error: 474.005371 +2025-07-08 10:45:26,174 - INFO - Mean Error: 19.724190 +2025-07-08 10:45:26,174 - INFO - Std Error: 27.830032 +2025-07-08 10:45:26,175 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-08 10:45:26,230 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 10:45:26,230 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 10:45:26,231 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 10:45:26,231 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 10:45:26,240 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-08 10:45:26,240 - INFO - Sample: N_S_WWS_WM_215 +2025-07-08 10:45:26,240 - INFO - Max Error: 475.202637 +2025-07-08 10:45:26,240 - INFO - Mean Error: 18.083265 +2025-07-08 10:45:26,240 - INFO - Std Error: 27.598852 +2025-07-08 10:45:26,241 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-08 10:45:26,280 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 10:45:26,285 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 10:45:26,285 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 10:45:26,286 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 10:45:26,295 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-08 10:45:26,297 - INFO - Sample: N_S_WWS_WM_073 +2025-07-08 10:45:26,297 - INFO - Max Error: 7346.312012 +2025-07-08 10:45:26,297 - INFO - Mean Error: 20.367575 +2025-07-08 10:45:26,297 - INFO - Std Error: 106.303505 +2025-07-08 10:45:26,299 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-08 10:45:26,366 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 10:45:26,366 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 10:45:26,366 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 10:45:26,366 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 10:45:26,375 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-08 10:45:26,375 - INFO - Sample: N_S_WWS_WM_323 +2025-07-08 10:45:26,375 - INFO - Max Error: 558.258301 +2025-07-08 10:45:26,375 - INFO - Mean Error: 19.368584 +2025-07-08 10:45:26,375 - INFO - Std Error: 31.317421 +2025-07-08 10:45:26,376 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-08 10:45:26,428 - INFO - targets: torch.Size([1, 10000]) +2025-07-08 10:45:26,428 - INFO - outputs: torch.Size([1, 10000]) +2025-07-08 10:45:26,428 - INFO - true_pressure_np.shape: (10000,) +2025-07-08 10:45:26,428 - INFO - pred_pressure_np.shape: (10000,) +2025-07-08 10:45:26,438 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-08 10:45:26,438 - INFO - Sample: N_S_WWS_WM_240 +2025-07-08 10:45:26,438 - INFO - Max Error: 568.939941 +2025-07-08 10:45:26,438 - INFO - Mean Error: 20.642050 +2025-07-08 10:45:26,438 - INFO - Std Error: 28.538563 +2025-07-08 10:45:26,446 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-08 10:45:26,447 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-08 10:45:26,447 - INFO - Evaluation Results: +2025-07-08 10:45:26,447 - INFO - MSE_mean: 0.240974 +2025-07-08 10:45:26,447 - INFO - MSE_std: 0.305666 +2025-07-08 10:45:26,447 - INFO - MAE_mean: 0.167481 +2025-07-08 10:45:26,447 - INFO - MAE_std: 0.007664 +2025-07-08 10:45:26,447 - INFO - RMSE_mean: 0.421983 +2025-07-08 10:45:26,447 - INFO - RMSE_std: 0.250806 +2025-07-08 10:45:26,447 - INFO - Max_Error_mean: 16.072868 +2025-07-08 10:45:26,447 - INFO - Max_Error_std: 23.293617 +2025-07-08 10:45:26,447 - INFO - Rel_L2_mean: 1.924997 +2025-07-08 10:45:26,447 - INFO - Rel_L2_std: 0.335800 +2025-07-08 10:45:26,447 - INFO - Rel_L1_mean: 1.924997 +2025-07-08 10:45:26,447 - INFO - Rel_L1_std: 0.335800 +2025-07-09 11:39:41,247 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-09 11:39:41,251 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 100000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-09 11:39:41,340 - INFO - Using device: cuda:0 +2025-07-09 11:39:41,604 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-09 11:39:41,639 - INFO - ******************** +2025-07-09 11:39:41,652 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-11 17:25:45,213 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-11 17:25:45,215 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 100000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-11 17:25:45,234 - INFO - Using device: cuda:0 +2025-07-11 17:25:45,416 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-11 17:25:45,437 - INFO - ******************** +2025-07-11 17:25:45,445 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-11 17:25:45,772 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 17:25:45,772 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 17:25:45,773 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 17:25:45,773 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 17:25:45,778 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-11 17:25:45,778 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-11 17:25:45,845 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 17:25:45,845 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 17:25:45,845 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 17:25:45,845 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 17:25:45,850 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-11 17:25:45,850 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-11 17:25:45,915 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 17:25:45,915 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 17:25:45,915 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 17:25:45,915 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 17:25:45,920 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-11 17:25:45,920 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-11 17:25:45,984 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 17:25:45,984 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 17:25:45,984 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 17:25:45,984 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 17:25:45,989 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-11 17:25:45,989 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-11 17:25:46,053 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 17:25:46,053 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 17:25:46,053 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 17:25:46,053 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 17:25:46,058 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-11 17:25:46,077 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-11 17:25:46,077 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-11 17:25:46,077 - INFO - Evaluation Results: +2025-07-11 17:25:46,077 - INFO - MSE_mean: 0.241383 +2025-07-11 17:25:46,077 - INFO - MSE_std: 0.307004 +2025-07-11 17:25:46,077 - INFO - MAE_mean: 0.167414 +2025-07-11 17:25:46,077 - INFO - MAE_std: 0.007522 +2025-07-11 17:25:46,077 - INFO - RMSE_mean: 0.421962 +2025-07-11 17:25:46,077 - INFO - RMSE_std: 0.251656 +2025-07-11 17:25:46,077 - INFO - Max_Error_mean: 16.080824 +2025-07-11 17:25:46,077 - INFO - Max_Error_std: 23.295095 +2025-07-11 17:25:46,078 - INFO - Rel_L2_mean: 1.932155 +2025-07-11 17:25:46,078 - INFO - Rel_L2_std: 0.345641 +2025-07-11 17:25:46,078 - INFO - Rel_L1_mean: 1.932155 +2025-07-11 17:25:46,078 - INFO - Rel_L1_std: 0.345641 +2025-07-11 18:13:28,038 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-11 18:13:28,040 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 100000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-11 18:13:28,059 - INFO - Using device: cuda:0 +2025-07-11 18:13:28,241 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-11 18:13:28,262 - INFO - ******************** +2025-07-11 18:13:28,270 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-11 18:13:28,589 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:13:28,589 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:13:28,590 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:13:28,590 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:13:28,595 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-11 18:13:28,596 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-11 18:13:28,660 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:13:28,661 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:13:28,667 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:13:28,673 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:13:28,684 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-11 18:13:28,686 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-11 18:13:28,755 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:13:28,756 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:13:28,756 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:13:28,757 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:13:28,761 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-11 18:13:28,762 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-11 18:13:28,826 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:13:28,826 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:13:28,826 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:13:28,826 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:13:28,831 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-11 18:13:28,831 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-11 18:13:28,894 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:13:28,894 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:13:28,895 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:13:28,895 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:13:28,899 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-11 18:13:28,902 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-11 18:13:28,902 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-11 18:13:28,902 - INFO - Evaluation Results: +2025-07-11 18:13:28,902 - INFO - MSE_mean: 0.242434 +2025-07-11 18:13:28,902 - INFO - MSE_std: 0.307976 +2025-07-11 18:13:28,902 - INFO - MAE_mean: 0.167413 +2025-07-11 18:13:28,902 - INFO - MAE_std: 0.007127 +2025-07-11 18:13:28,902 - INFO - RMSE_mean: 0.423062 +2025-07-11 18:13:28,902 - INFO - RMSE_std: 0.251898 +2025-07-11 18:13:28,902 - INFO - Max_Error_mean: 16.225632 +2025-07-11 18:13:28,902 - INFO - Max_Error_std: 23.296082 +2025-07-11 18:13:28,902 - INFO - Rel_L2_mean: 1.861108 +2025-07-11 18:13:28,902 - INFO - Rel_L2_std: 0.297842 +2025-07-11 18:13:28,902 - INFO - Rel_L1_mean: 1.861108 +2025-07-11 18:13:28,902 - INFO - Rel_L1_std: 0.297842 +2025-07-11 18:53:31,140 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-11 18:53:31,143 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 100000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-11 18:53:31,229 - INFO - Using device: cuda:0 +2025-07-11 18:53:31,426 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-11 18:53:31,447 - INFO - ******************** +2025-07-11 18:53:31,458 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-11 18:53:31,854 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:53:31,857 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:53:31,857 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:53:31,857 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:53:31,862 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-11 18:53:31,863 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-11 18:53:31,930 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:53:31,930 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:53:31,930 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:53:31,930 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:53:31,935 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-11 18:53:31,935 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-11 18:53:32,001 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:53:32,001 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:53:32,002 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:53:32,002 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:53:32,006 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-11 18:53:32,007 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-11 18:53:32,072 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:53:32,072 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:53:32,072 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:53:32,074 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:53:32,078 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-11 18:53:32,078 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-11 18:53:32,142 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:53:32,142 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:53:32,143 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:53:32,143 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:53:32,147 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-11 18:53:32,149 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-11 18:53:32,150 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-11 18:53:32,150 - INFO - Evaluation Results: +2025-07-11 18:53:32,150 - INFO - MSE_mean: 0.242434 +2025-07-11 18:53:32,150 - INFO - MSE_std: 0.307976 +2025-07-11 18:53:32,150 - INFO - MAE_mean: 0.167413 +2025-07-11 18:53:32,150 - INFO - MAE_std: 0.007127 +2025-07-11 18:53:32,150 - INFO - RMSE_mean: 0.423062 +2025-07-11 18:53:32,150 - INFO - RMSE_std: 0.251898 +2025-07-11 18:53:32,150 - INFO - Max_Error_mean: 16.225632 +2025-07-11 18:53:32,150 - INFO - Max_Error_std: 23.296082 +2025-07-11 18:53:32,150 - INFO - Rel_L2_mean: 1.861108 +2025-07-11 18:53:32,150 - INFO - Rel_L2_std: 0.297842 +2025-07-11 18:53:32,150 - INFO - Rel_L1_mean: 1.861108 +2025-07-11 18:53:32,150 - INFO - Rel_L1_std: 0.297842 +2025-07-11 18:58:55,639 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-11 18:58:55,641 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 100000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-11 18:58:55,736 - INFO - Using device: cuda:0 +2025-07-11 18:58:55,924 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-11 18:58:55,948 - INFO - ******************** +2025-07-11 18:58:55,957 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-11 18:58:56,344 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:58:56,346 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:58:56,347 - INFO - true_pressure_np.shape: (10000, 3) +2025-07-11 18:58:56,347 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:58:56,347 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:58:56,352 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-11 18:58:56,353 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-11 18:58:56,419 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:58:56,419 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:58:56,419 - INFO - true_pressure_np.shape: (10000, 3) +2025-07-11 18:58:56,419 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:58:56,419 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:58:56,424 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-11 18:58:56,424 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-11 18:58:56,489 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:58:56,489 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:58:56,490 - INFO - true_pressure_np.shape: (10000, 3) +2025-07-11 18:58:56,490 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:58:56,490 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:58:56,493 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-11 18:58:56,494 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-11 18:58:56,560 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:58:56,560 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:58:56,560 - INFO - true_pressure_np.shape: (10000, 3) +2025-07-11 18:58:56,560 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:58:56,561 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:58:56,565 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-11 18:58:56,565 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-11 18:58:56,628 - INFO - targets: torch.Size([1, 10000]) +2025-07-11 18:58:56,628 - INFO - outputs: torch.Size([1, 10000]) +2025-07-11 18:58:56,629 - INFO - true_pressure_np.shape: (10000, 3) +2025-07-11 18:58:56,629 - INFO - true_pressure_np.shape: (10000,) +2025-07-11 18:58:56,629 - INFO - pred_pressure_np.shape: (10000,) +2025-07-11 18:58:56,632 - INFO - Saved raw prdiction data to results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-11 18:58:56,634 - INFO - Evaluation complete, Results save to results/Train_Test +2025-07-11 18:58:56,634 - INFO - Raw prediction data saved to results/Train_Test/prediction_data +2025-07-11 18:58:56,634 - INFO - Evaluation Results: +2025-07-11 18:58:56,634 - INFO - MSE_mean: 0.242434 +2025-07-11 18:58:56,634 - INFO - MSE_std: 0.307976 +2025-07-11 18:58:56,634 - INFO - MAE_mean: 0.167413 +2025-07-11 18:58:56,635 - INFO - MAE_std: 0.007127 +2025-07-11 18:58:56,635 - INFO - RMSE_mean: 0.423062 +2025-07-11 18:58:56,635 - INFO - RMSE_std: 0.251898 +2025-07-11 18:58:56,635 - INFO - Max_Error_mean: 16.225632 +2025-07-11 18:58:56,635 - INFO - Max_Error_std: 23.296082 +2025-07-11 18:58:56,635 - INFO - Rel_L2_mean: 1.861108 +2025-07-11 18:58:56,635 - INFO - Rel_L2_std: 0.297842 +2025-07-11 18:58:56,635 - INFO - Rel_L1_mean: 1.861108 +2025-07-11 18:58:56,635 - INFO - Rel_L1_std: 0.297842 +2025-07-11 19:03:29,949 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-11 19:03:29,950 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 100000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-11 19:03:30,034 - INFO - Using device: cuda:0 +2025-07-11 19:03:30,225 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-11 19:03:30,248 - INFO - ******************** +2025-07-11 19:03:30,277 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-11 19:05:09,547 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-11 19:05:09,549 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 100000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-11 19:05:09,646 - INFO - Using device: cuda:0 +2025-07-11 19:05:09,835 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-11 19:05:09,859 - INFO - ******************** +2025-07-11 19:05:09,868 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-12 08:06:52,287 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-12 08:06:52,290 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 50000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-12 08:06:52,525 - INFO - Using device: cuda:0 +2025-07-12 08:06:52,779 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-12 08:06:52,812 - INFO - ******************** +2025-07-12 08:06:52,938 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-12 08:07:47,397 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-12 08:07:47,398 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 50000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-12 08:07:47,598 - INFO - Using device: cuda:0 +2025-07-12 08:07:47,834 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-12 08:07:47,860 - INFO - ******************** +2025-07-12 08:07:47,870 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-12 08:50:50,942 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-12 08:50:50,946 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 50000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-12 08:50:51,351 - INFO - Using device: cuda:0 +2025-07-12 08:50:51,633 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-12 08:50:51,665 - INFO - ******************** +2025-07-12 08:50:51,712 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-12 08:51:36,878 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-12 08:51:36,881 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 10000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-12 08:51:37,085 - INFO - Using device: cuda:0 +2025-07-12 08:51:37,337 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-12 08:51:37,363 - INFO - ******************** +2025-07-12 08:51:37,417 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-14 08:18:59,078 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-07-14 08:18:59,084 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 100000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-07-14 08:18:59,125 - INFO - Using device: cuda:0 +2025-07-14 08:18:59,356 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-07-14 08:18:59,374 - INFO - ******************** +2025-07-14 08:18:59,398 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-08-07 12:22:31,153 - INFO - **************************** Starting evaluation of RegDGCNN model +2025-08-07 12:22:31,165 - INFO - Arguments: +{ 'cache_dir': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', + 'dataset_path': '/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', + 'dropout': 0.4, + 'emb_dims': 1024, + 'exp_name': 'Train_Test', + 'k': 40, + 'model_checkpoint': 'experiments/Train_Test/best_model_pth', + 'num_points': 50000, + 'num_vis_samples': 5, + 'output_channels': 1, + 'sample_ids': None, + 'seed': 1, + 'visualize': True} +2025-08-07 12:22:31,186 - INFO - Using device: cuda:0 +2025-08-07 12:22:31,402 - INFO - Loading model form experiments/Train_Test/best_model_pth +2025-08-07 12:22:31,489 - INFO - ******************** diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/evaluation_metrics.txt b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/evaluation_metrics.txt new file mode 100644 index 0000000..c6cee1d --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/evaluation_metrics.txt @@ -0,0 +1,16 @@ +Evaluation Metrics for RegDGCNN +Model Checkpoint: experiments/Train_Test/best_model_pth +Number of samples: 5 + +MSE_mean: 0.242434 +MSE_std: 0.307976 +MAE_mean: 0.167413 +MAE_std: 0.007127 +RMSE_mean: 0.423062 +RMSE_std: 0.251898 +Max_Error_mean: 16.225632 +Max_Error_std: 23.296082 +Rel_L2_mean: 1.861108 +Rel_L2_std: 0.297842 +Rel_L1_mean: 1.861108 +Rel_L1_std: 0.297842 diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/292_Pred.png b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/292_Pred.png new file mode 100644 index 0000000..d65a6a4 Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/292_Pred.png differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/292_Pred.vtk b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/292_Pred.vtk new file mode 100644 index 0000000..4f2ea89 Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/292_Pred.vtk differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/292_True.png b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/292_True.png new file mode 100644 index 0000000..6979023 Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/292_True.png differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/292_True.vtk b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/292_True.vtk new file mode 100644 index 0000000..a077d71 Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/292_True.vtk differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/pointNet_To_vtk.py b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/pointNet_To_vtk.py new file mode 100644 index 0000000..573e68c --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/pointNet_To_vtk.py @@ -0,0 +1,28 @@ +import numpy as np +import pyvista as pv +pv.OFF_SCREEN = True # Disable interactive display + +def write_pointnet_vtk(points, + pressure, + fname: str = "pressure_cloud.vtk", + binary: bool = True) -> None: + """ + :param points: (N,3) float array of xyz positions + :param pressure: (N,) or (N,1) float array per point + """ + assert points.ndim == 2 and points.shape[1] == 3, \ + f"points should be (N,3), got {points.shape}" + pressure = pressure.reshape(-1) + assert pressure.shape[0] == points.shape[0], \ + "pressure must have same N as points" + + # Wrap into a PyVista PolyData + cloud = pv.PolyData(points) + + # Attach per-point scalar array + cloud["pressure"] = pressure + + # Save to .vtk (legacy VTK format) + cloud.save(fname, binary=binary) + print(f"Saved {points.shape[0]} points with 'pressure' → {fname}") + diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/test.py b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/test.py new file mode 100644 index 0000000..72f62d0 --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/postProcess/test.py @@ -0,0 +1,17 @@ +import sys +import os +sys.path.append(os.path.dirname(os.path.abspath(__file__))) +from pointNet_To_vtk import write_pointnet_vtk +import numpy as np +import pyvista as pv +pv.OFF_SCREEN = True # Disable interactive display + +DataPath = os.path.expandvars('$HOME/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz') +data = np.load(DataPath) +points = data['points'] # shape (N_points, 3) +true_p = data['true_pressure_np'] # shape (N_points,) +pred_p = data['pred_pressure_np'] # shape (N_points,) + +# True pressure +write_pointnet_vtk(points, true_p, fname="292_True.vtk") +write_pointnet_vtk(points, pred_p, fname="292_Pred.vtk") diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz new file mode 100644 index 0000000..1c5a72b Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_073_prediction_data.npz differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz new file mode 100644 index 0000000..ede1ff3 Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_215_prediction_data.npz differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz new file mode 100644 index 0000000..d81a30a Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_240_prediction_data.npz differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz new file mode 100644 index 0000000..fc2d4b6 Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz new file mode 100644 index 0000000..f4b9e10 Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_323_prediction_data.npz differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/visualization.png b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/visualization.png new file mode 100644 index 0000000..4bd46ae Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/visualization.png differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/visualization/matplotlib_version.png b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/visualization/matplotlib_version.png new file mode 100644 index 0000000..7960dc4 Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/visualization/matplotlib_version.png differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/visualization/visualization.png b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/visualization/visualization.png new file mode 100644 index 0000000..7960dc4 Binary files /dev/null and b/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/visualization/visualization.png differ diff --git a/RegDGCNN_SurfaceFields/My_python_job/run_pipeline.py b/RegDGCNN_SurfaceFields/My_python_job/run_pipeline.py new file mode 100644 index 0000000..8fc7d0b --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/run_pipeline.py @@ -0,0 +1,257 @@ +#!/usr/bin/env python3 +# run_pipeline.py + +import os +import argparse +import subprocess +import logging +import time +import pprint +from datetime import datetime +from utils import setup_logger +from colorama import Fore, Style + + + +#logging.basicConfig( +# level=logging.INFO, # <-- This enables logging.info() +# format='[%(asctime)s] %(levelname)s: %(message)s', +# datefmt='%H:%M:%S' +#) + +def parse_args(): + + parser = argparse.ArgumentParser(description="Test") + + # Pipeline control + parser.add_argument('--stages', type=str, default='all', + choices=['preprocess', 'train', 'evaluate', 'all'], + help='Pipeline stages to run') + + # Basic settings + parser.add_argument('--exp_name', type=str, required=True, help="Test") + parser.add_argument('--seed', type=int, default=1, help='Random seed') + + # Data settings + parser.add_argument('--dataset_path', type=str, help='Path to dataset') + parser.add_argument('--subset_dir', type=str, help='Path to train/val/test splits') + parser.add_argument('--cache_dir', type=str, help='Path to cache directory') + parser.add_argument('--num_points', type=int, default=10000, help='Number of points to sample') + + # Training settings + parser.add_argument('--batch_size', type=int, default=12, help='Batch size per GPU') + parser.add_argument('--epochs', type=int, default=150, help='Number of epochs') + parser.add_argument('--lr', type=float, default=0.001, help='Learning rate') + parser.add_argument('--test_only', type=int, default=0, help='Only test the model, no training') + parser.add_argument('--num_workers', type=int, default=4, help='Number of data loading workers') + parser.add_argument('--gpus', type=str, default='0', help='GPUs to use (comma-separated)') + + # Model settings + parser.add_argument('--dropout', type=float, default=0.4, help='Dropout rate') + parser.add_argument('--emb_dims', type=int, default=1024, help='Embedding dimensions') + parser.add_argument('--k', type=int, default=40, help='Number of nearest neighbors') + parser.add_argument('--output_channels', type=int, default=1, help='Number of output channels') + + # Evaluation settings + parser.add_argument('--num_eval_samples', type=int, default=5, help='Number of samples to evaluate in detail') + + return parser.parse_args() + +def preprocess_data(args): + + """ + Preprocess the dataset to create cached point cloud data. + + Args: + True if preprocessing was successful, False otherwise + """ + + logging.info("**************************Starting data preprocessing...") + + # Create cache directory if it doesn't exist + cache_dir = args.cache_dir or os.path.join(args.dataset_path, "processed_data") + os.makedirs(cache_dir, exist_ok=True) + + try: + # Import required modules for preprocessing + from data_loader import SurfacePressureDataset + + # Create the dataset with preprocessing enabled + dataset = SurfacePressureDataset( + root_dir = args.dataset_path, + num_points = args.num_points, + preprocess = True, + cache_dir = cache_dir + ) + + # Process all files + logging.info(f"Processing {len(dataset.vtk_files)} VTK files with {args.num_points} points per sample") + for ii, vtk_file in enumerate(dataset.vtk_files): + logging.info(f"Processing file {ii+1} / {len(dataset.vtk_files)}: {os.path.basename(vtk_file)}") + _ = dataset[ii] # This will trigger preprocessing and caching + + logging.info(f"{Fore.MAGENTA}Data preprocessing complete. Cache data saved to {cache_dir}{Style.RESET_ALL}") + return True + except Exception as e: + logging.error(f"Preprocessing failed with error: {e}") + return False + +def train_model(args): + logging.info("*************************Starting model training...") + + # Prepare command for training script + cmd = [ + "python", "train.py", + "--exp_name", args.exp_name, + "--dataset_path", args.dataset_path, + "--subset_dir", args.subset_dir, + "--num_points", str(args.num_points), + "--batch_size", str(args.batch_size), + "--epochs", str(args.epochs), + "--lr", str(args.lr), + "--dropout", str(args.dropout), + "--emb_dims", str(args.emb_dims), + "--k", str(args.k), + "--output_channels", str(args.output_channels), + "--seed", str(args.seed), + "--num_workers", str(args.num_workers), + "--test_only", str(args.test_only) + ] + + if args.cache_dir: + cmd.extend(["--cache_dir", args.cache_dir]) + + # Set up environment variables for distributed training + env = os.environ.copy() + env["CUDA_VISIBLE_DEVICES"] = args.gpus + + # Run the training script + start_time = time.time() + process = subprocess.Popen(cmd, env=env) + process.wait() + + if process.returncode != 0: + logging.error("Training failed!") + return False + + elapsed_time = time.time() - start_time + logging.info(f"**********************Model training completed ") + logging.info(f"Model training completed in {elapsed_time:.2f} seconds") + return True + +def evaluate_model(args): + """ + Evaluatie the trained model using the evaluate.py script. + + Args: + args: Command line arguments + + Returns: + True if evaluation was successful, False otherwise + """ + logging.info("*************************starting model evaluation...") + + # Path to the trained model + model_checkpoint = os.path.join("experiments", args.exp_name, "best_model_pth") + + if not os.path.exists(model_checkpoint): + logging.error(f"Model checkpoint not found at {model_checkpoint}") + return False + + # Prepare command for evaluation script + cmd = [ + "python", "evaluate.py", + "--exp_name", args.exp_name, + "--model_checkpoint", model_checkpoint, + "--dataset_path", args.dataset_path, + "--num_points", str(args.num_points), + "--num_vis_samples", str(args.num_eval_samples), + "--visualize", # This will now save raw data instead of generating plots + "--dropout", str(args.dropout), + "--emb_dims", str(args.emb_dims), + "--k", str(args.k), + "--output_channels", str(args.output_channels), + "--seed", str(args.seed) + ] + + if args.cache_dir: + cmd.extend(["--cache_dir", args.cache_dir]) + + # Run the evaluation script + env = os.environ.copy() + env["CUDA_VISIBLE_DEVICES"] = args.gpus.split(',')[0] # Use just the first GPU for evaluation + + process = subprocess.Popen(cmd, env=env) + process.wait() + + if process.returncode != 0: + logging.error("Evaluation failed!") + return False + + logging.info("Model evaluation complete") + return True + + +def main(): + """ main function to run the complete pipeline. """ + args = parse_args() + + # Set up logging + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + log_dir = os.path.join("logs", args.exp_name) + os.makedirs(log_dir, exist_ok=True) + log_file = os.path.join(log_dir, f"pipeline_{timestamp}.log") + setup_logger(log_file) + + logging.info(f"Starting DrivAerNet pipeline - Experiment: {args.exp_name}") + logging.info(f"Arguments:\n" + pprint.pformat(vars(args), indent=2)) + + # Execute the selected pipeline stages + stages = args.stages.split(',') if ',' in args.stages else [args.stages] + if 'all' in stages: + stages = ['preporcess', 'train', 'evaluate'] + + results = {} + + # Preprocess data if requested + if 'preprocess' in stages: + results['preprocess'] = preprocess_data(args) + else: + # Skip preprocessing but mark as successful to allow training to proceed + results['preprocess'] = True + logging.info("Preprocessing stage skipped.") + + # Train model if requested and preprocessing succeeded + if 'train' in stages and results['preprocess']: + results['train'] = train_model(args) + else: + # If training not requested, mark as successful for evaluation to proceed + if 'train' not in stages: + results['train'] = True + logging.info("Training stage skipped.") + + # Evaluate model if requested and training succeeded + if 'evaluate' in stages and results.get('train', False): + results['evaluate'] = evaluate_model(args) + else: + # If evaluation not requested, mark as true for final success check + if 'evaluate' not in stages: + results['evaluate'] = True + logging.info("Evaluation stage skipped.") + + # Print Summary + logging.info("Pipleline execution complete.") + logging.info("Results summary: ") + for stage, success in results.items(): + status = "Success" if success else "Failed" + logging.info(f" {stage}: {status}") + + # Check if experiment was successful overall +# overall_success = all(results.values()) +# loggiing.info(f"Overall status: {'Success' if overall_success else 'Failed'}") +# return 0 if overall_success else 1 + + +if __name__=="__main__": + exit(main()) + diff --git a/RegDGCNN_SurfaceFields/My_python_job/test_metrics.txt b/RegDGCNN_SurfaceFields/My_python_job/test_metrics.txt new file mode 100644 index 0000000..be77c82 --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/test_metrics.txt @@ -0,0 +1,7 @@ +Test MSE: 0.998695 +Test MAE: 0.608575 +Max MAE: 20.926542 +Test R2: 0.1107 +Relative L2 Error: 0.942515 +Relative L1 error: 0.938526 +Total inference time: 0.02s for 48 samples diff --git a/RegDGCNN_SurfaceFields/My_python_job/train.py b/RegDGCNN_SurfaceFields/My_python_job/train.py new file mode 100644 index 0000000..70fafec --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/train.py @@ -0,0 +1,419 @@ +# train.py +import os +import torch +import torch.distributed as dist +import torch.multiprocessing as mp +import torch.nn.functional as F +import torch.optim as optim +from torch.optim.lr_scheduler import ReduceLROnPlateau +import numpy as np +import time +import argparse +import matplotlib.pyplot as plt +from tqdm import tqdm +import logging +import pprint + +# Import modules +from data_loader import get_dataloaders, PRESSURE_MEAN, PRESSURE_STD +from model_pressure import RegDGCNN_pressure +from utils import setup_logger, setup_seed +from colorama import Fore, Style + +def parse_args(): + """Parse command line arguments.""" + parser = argparse.ArgumentParser(description='Train pressure prediction models on DrivAerNet++') + + # Basic settings + parser.add_argument('--exp_name', type=str, default='PressurePrediction', help='Experiment name') + parser.add_argument('--seed', type=int, default=1, help='Random seed') + + # Data settings + parser.add_argument('--dataset_path', type=str, help='Path to dataset') + parser.add_argument('--subset_dir', type=str, help='Path to train/val/test splits') + parser.add_argument('--cache_dir', type=str, help='Path to cache directory') + parser.add_argument('--num_points', type=int, default=10000, help='Number of points to sample') + + # Training settings + parser.add_argument('--batch_size', type=int, default=12, help='Batch size per GPU') + parser.add_argument('--epochs', type=int, default=150, help='Number of epochs') + parser.add_argument('--lr', type=float, default=0.001, help='Learning rate') +# parser.add_argument('--test_only', action='store_true', help='Only test the model, no training') + parser.add_argument('--test_only', type=int, default=0, help='Only test the model, no training') + parser.add_argument('--num_workers', type=int, default=4, help='Number of data loading workers') + parser.add_argument('--gpus', type=str, default='0', help='GPUs to use (comma-separated)') + + # Model settings + parser.add_argument('--dropout', type=float, default=0.4, help='Dropout rate') + parser.add_argument('--emb_dims', type=int, default=1024, help='Embedding dimensions') + parser.add_argument('--k', type=int, default=40, help='Number of nearest neighbors') + parser.add_argument('--output_channels', type=int, default=1, help='Number of output channels') + + return parser.parse_args() + +def initialize_model(args, local_rank): + """ Initialize and return the RegDGCN model. """ + args = vars(args) + model = RegDGCNN_pressure(args).to(local_rank) + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[local_rank], + find_unused_parameters=True, + output_device=local_rank + ) + return model + +def train_one_epoch(model, train_dataloader, optimizer, criterion, local_rank): + """Train for one epoch.""" + model.train() + total_loss = 0 + + for data, targets in tqdm(train_dataloader, desc="[Training]"): + global PRESSURE_MEAN, PRESSURE_STD + + # Right version + """ + PRESSURE_MEAN = torch.tensor(PRESSURE_MEAN, device=data.device) + PRESSURE_STD = torch.tensor(PRESSURE_STD, device=data.device) + + data, targets = data.squeeze(1).to(local_rank), targets.squeeze(1).to(local_rank) + targets = (targets - PRESSURE_MEAN) / PRESSURE_STD + """ + + # Logic bug version + data = data.squeeze(1).to(local_rank) + targets = targets.squeeze(1).to(local_rank) + + targets = (targets - PRESSURE_MEAN) / PRESSURE_STD + + optimizer.zero_grad() + outputs = model(data) + loss = criterion(outputs.squeeze(1), targets) + + loss.backward() + optimizer.step() + total_loss += loss.item() + + return total_loss / len(train_dataloader) + +def validate(model, val_dataloader, criterion, local_rank): + """ Validate the model""" + model.eval() + total_loss = 0 + + with torch.no_grad(): + for data, targets in tqdm(val_dataloader, desc="[Validation]"): + data = data.squeeze(1).to(local_rank) + targets = targets.squeeze(1).to(local_rank) + targets = (targets - PRESSURE_MEAN) / PRESSURE_STD + + outputs = model(data) + loss = criterion(outputs.squeeze(1), targets) + total_loss += loss.item() + + return total_loss / len(val_dataloader) + +def test_model(model, test_dataloader, criterion, local_rank, exp_dir): + """ Test the model and calculate metrics. """ + model.eval() + total_mse, total_mae = 0, 0 + total_rel_l2, total_rel_l1 = 0, 0 + total_inference_time = 0 + total_samples = 0 + all_outputs = [] + all_targets = [] + + with torch.no_grad(): + for data, targets in tqdm(test_dataloader, desc="[Testing]"): + start_time = time.time() + + data, targets = data.squeeze(1).to(local_rank), targets.squeeze(1).to(local_rank) + normalized_targets = (targets - PRESSURE_MEAN) / PRESSURE_STD + + outputs = model(data) + normalized_outputs = outputs.squeeze(1) + + inference_time = time.time() - start_time + total_inference_time += inference_time + + # Calculate metrics + mse = criterion(normalized_outputs, normalized_targets) + mae = F.l1_loss(normalized_outputs, normalized_targets) + + # Calculate relative errors + rel_l2 = torch.mean(torch.norm(normalized_outputs - normalized_targets, p=2, dim=-1) / + torch.norm(normalized_targets, p=2, dim=-1)) + rel_l1 = torch.mean(torch.norm(normalized_outputs - normalized_targets, p=1, dim=-1) / + torch.norm(normalized_targets, p=1, dim=-1)) + + batch_size = targets.size(0) + total_mse += mse.item() * batch_size + total_mae += mae.item() * batch_size + total_rel_l2 += rel_l2.item() * batch_size + total_rel_l1 += rel_l1.item() * batch_size + total_samples += batch_size + + # Store normalized predictions and targets for R² calculation + all_outputs.append(normalized_outputs.cpu()) + all_targets.append(normalized_targets.cpu()) + + # Aggregate results across all processes + total_mse_tensor = torch.tensor(total_mse).to(local_rank) + total_mae_tensor = torch.tensor(total_mae).to(local_rank) + total_rel_l2_tensor = torch.tensor(total_rel_l2).to(local_rank) + total_rel_l1_tensor = torch.tensor(total_rel_l1).to(local_rank) + total_samples_tensor = torch.tensor(total_samples).to(local_rank) + + dist.reduce(total_mse_tensor, dst=0, op=dist.ReduceOp.SUM) + dist.reduce(total_mae_tensor, dst=0, op=dist.ReduceOp.SUM) + dist.reduce(total_rel_l2_tensor, dst=0, op=dist.ReduceOp.SUM) + dist.reduce(total_rel_l1_tensor, dst=0, op=dist.ReduceOp.SUM) + dist.reduce(total_samples_tensor, dst=0, op=dist.ReduceOp.SUM) + + # Checkout the value + if dist.get_rank() == 0: + logging.info(f"Total MSE across all processes: {total_mse_tensor.item()}") + + if local_rank ==0: + # Calculate aggregated metrics + avg_mse = total_mse_tensor.item() / total_samples_tensor.item() + avg_mae = total_mae_tensor.item() / total_samples_tensor.item() + avg_rel_l2 = total_rel_l2_tensor.item() / total_samples_tensor.item() + avg_rel_l1 = total_rel_l1_tensor.item() / total_samples_tensor.item() + + # Calculate R² score - only on rank 0 with locally collected data + all_outputs = torch.cat(all_outputs, dim=0).numpy() + all_targets = torch.cat(all_targets, dim=0).numpy() + tmp = np.mean(all_targets) + logging.info("mean value for all_targets: {tmp}") + ss_tot = np.sum((all_targets - np.mean(all_targets)) ** 2) + ss_res = np.sum((all_targets - all_outputs) ** 2) + r_squared = 1 - (ss_res / ss_tot) if ss_tot > 0 else 0 + + # Calculate max AE + max_ae = np.max(np.abs(all_targets - all_outputs)) + logging.info(f"Test MSE: {avg_mse:.6f}, Test MAE: {avg_mae:.6f}, Max AE: {max_ae:.6f}, Test R2: {r_squared:.4f}") + logging.info(f"Relative L2 Error: {avg_rel_l2:.6f}, Relative L1 error: {avg_rel_l1:.6f}") + logging.info(f"Total inference time: {total_inference_time: .2f}s for {total_samples_tensor.item()} samples") + + # Save metrics to a text file + metrics_file = os.path.join(exp_dir, 'test_metrics.txt') + with open(metrics_file, 'w') as f: + f.write(f"Test MSE: {avg_mse:.6f}\n") + f.write(f"Test MAE: {avg_mae:.6f}\n") + f.write(f"Max MAE: {max_ae:.6f}\n") + f.write(f"Test R2: {r_squared:.4f}\n") + f.write(f"Relative L2 Error: {avg_rel_l2:.6f}\n") + f.write(f"Relative L1 error: {avg_rel_l1:.6f}\n") + f.write(f"Total inference time: {total_inference_time: .2f}s for {total_samples_tensor.item()} samples\n") + +def train_and_evaluate(rank, world_size, args): + """ main function for Distributed training and evaluation. """ + setup_seed(args.seed) + + # Initialize process group for DDP + dist.init_process_group(backend='nccl', init_method='env://', world_size=world_size, rank=rank) + + local_rank = rank + torch.cuda.set_device(local_rank) + + # Set up logging (only on rank 0) + if local_rank == 0: + exp_dir = os.path.join('experiments', args.exp_name) + os.makedirs(exp_dir, exist_ok=True) + log_file = os.path.join(exp_dir, 'training.log') + setup_logger(log_file) + logging.info(f"args.exp_name : {args.exp_name}") + logging.info(f"Arguments:\n" + pprint.pformat(vars(args), indent=2)) + logging.info(f"{Fore.RED}*******************************Starting training with {world_size} GPUs{Style.RESET_ALL}") + + # Initialize model + model = initialize_model(args, local_rank) + + if local_rank == 0: + total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + logging.info(f"Total trainable parameters: {total_params}") + + # Prepare DataLoaders + train_dataloader, val_dataloader, test_dataloader = get_dataloaders( + args.dataset_path, args.subset_dir, args.num_points, + args.batch_size, world_size, rank, args.cache_dir, + args.num_workers + ) + + + # Log dataset info + if local_rank == 0: + logging.info( + f"Data loaded: {len(train_dataloader)} training batches, {len(val_dataloader)} validation batches, {len(test_dataloader)} test batches") + + # Set up criterion, optimizer, and scheduler + criterion = torch.nn.MSELoss() + optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4) + scheduler = ReduceLROnPlateau(optimizer, 'min', patience=10, factor=0.1, verbose=True) + + best_model_path = os.path.join('experiments', args.exp_name, 'best_model_pth') + final_model_path = os.path.join('experiments', args.exp_name, 'final_model_pth') + #best_model_path = os.path.join('experiments', args.exp_name, 'best_model_tmp') + #final_model_path = os.path.join('experiments', args.exp_name, 'final_model_tmp') + + # Check if test_only and model exists + if args.test_only and os.path.exists(best_model_path): + if local_rank == 0: + logging.info("Loading best model for testing only") + print("Testing the best model:") + model.load_state_dict(torch.load(best_model_path, map_location=f'cuda:{local_rank}')) + test_model(model, test_dataloader, criterion, local_rank, os.path.join('experiments', args.exp_name)) + dist.destroy_process_group() + return + + # Training tracking + best_val_loss = float('inf') + train_losses = [] + val_losses = [] + + if local_rank == 0: + logging.info(f"Staring training for {args.epochs} epochs") + + # Training loop + for epoch in range(args.epochs): + # Set epoch for the DistributedSampler + train_dataloader.sampler.set_epoch(epoch) + + # Training + train_loss = train_one_epoch(model, train_dataloader, optimizer, criterion, local_rank) + + # Validation + val_loss = validate(model, val_dataloader, criterion, local_rank) + + # Record losses. There has a change + if local_rank == 0: + train_losses.append(train_loss) + val_losses.append(val_loss) + logging.info(f"Epoch {epoch + 1}/{args.epochs} - Train Loss: {train_loss:.6f}, Val Loss: {val_loss:.6f}") + + # Save the best model + if val_loss < best_val_loss: + best_val_loss = val_loss + torch.save(model.state_dict(), best_model_path) + logging.info(f"New best model saved with Val Loss: {best_val_loss:.6f}") + + # Update learning rate scheduler + scheduler.step(val_loss) + + # Save progress rate scheduler + if (epoch + 1) % 10 == 0 or epoch == args.epochs - 1: + plt.figure(figsize=(10, 5)) + plt.plot(range(1, epoch + 2), train_losses, label='Training Loss') + plt.plot(range(1, epoch + 2), val_losses, label='Validation Loss') + plt.xlabel('Epoch') + plt.ylabel('Loss') + plt.legend() + plt.title(f'Training Progress - RegDGCNN') + plt.savefig(os.path.join('experiments', args.exp_name, f'training_progress.png')) + plt.close() + + # Save final model + if local_rank == 0: + torch.save(model.state_dict(), final_model_path) + logging.info(f"Final model saved to {final_model_path}") + + # Make sure all processes sync up before testing + dist.barrier() + + # Test the final model + if local_rank == 0: + logging.info("Testing the final model") + #test_model(model, test_dataloader, criterion, local_rank, os.path.join('experiments', args.exp_name)) + + # Test the best model + if local_rank == 0: + logging.info("Testing the best model") + model.load_state_dict(torch.load(best_model_path, map_location=f'cuda:{local_rank}')) + #test_model(model, test_dataloader, criterion, local_rank, os.path.join('experiments', args.exp_name)) + + # Clean up + dist.destroy_process_group() +def main(): + """ main function to parse arguments and start training.""" + args = parse_args() + + # Set the master address and port for DDP + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '12355' + + # Set visible GPUS + gpu_list = args.gpus + os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list + + # Count number of GPUs to use + world_size = len(gpu_list.split(',')) + + # Create experiment directory + exp_dir = os.path.join('experiments', args.exp_name) + os.makedirs(exp_dir, exist_ok=True) + + + # Start distributed training + mp.spawn(train_and_evaluate, args=(world_size, args), nprocs=world_size, join=True) + + +if __name__=="__main__": + main() + +''' + # Checkout the DataLoader object + logging.info(f"Type of train_dataloader: {type(train_dataloader)}") + logging.info(f"Number of train_dataloader: {len(train_dataloader)}") + #logging.info(f"List all methods and attributs of Dataloader: {dir(train_dataloader)}") + logging.info(f"We can access the internal conetnt by dataloader: ") + for ii, (points, pressure)in enumerate(train_dataloader): + logging.info(f"Batch: {ii}") + logging.info(f"Batch.points.shape: {points.shape}") # [2, 1, 3, 10000] + + sample_0 = points[0] # [1, 3, 10000] + sample_1 = points[1] # [1, 3, 10000] + logging.info(f"points_sample_0.shape: {sample_0.shape}") + + sample_0 = sample_0.squeeze(0) # [3, 10000] + sample_1 = sample_1.squeeze(0) # [3, 10000] + logging.info(f"points_sample_0.shape: {sample_0.shape}") + + x0 = sample_0[0] + x1 = sample_1[0] + logging.info(f"The first 10 points in x_coor for the sample_0: {x0[:10]}") + logging.info(f"The first 10 points in x_coor for the sample_1: {x1[:10]}") + + logging.info(f"Batch.Pressure.shape: {pressure.shape}") #[2, 1, 10000] + + sample_0 = pressure[0] # [1, 10000] + sample_1 = pressure[1] # [1, 10000] + logging.info(f"pressure_sample_0.shape: {sample_0.shape}") + + sample_0 = sample_0.squeeze(0) # [10000] + sample_1 = sample_1.squeeze(0) # [10000] + logging.info(f"pressure_sample_0.shape: {sample_0.shape}") + + logging.info(f"The first 10 points pressure for the sample_0: {sample_0[:10]}") + logging.info(f"The first 10 points pressure for the sample_1: {sample_1[:10]}") + + + # Checkout the torch.utils.data.subset object + train_subset = train_dataloader.dataset + logging.info(f"Type of train_subset: {type(train_subset)}") + logging.info(f"Number of samples of train_subset : {len(train_subset)}") + logging.info(f"Subset indices: {train_subset.indices[:5]}") + logging.info(f"List the train_subset vtk files:") + for ii, idx in enumerate(train_subset.indices): + vtk_file = train_subset.dataset.vtk_files[idx] + logging.info(f"{ii:3d}: {vtk_file}") + #logging.info(f"List all methods and attributs of subset: {dir(dataset)})") + + + # Checkout the full_dataset i.e. all .vtk files + full_dataset = train_subset.dataset + logging.info(f"Type of full_dataset: {type(full_dataset)}") + logging.info(f"Number of samples of full_dataset: {len(full_dataset.vtk_files)}") + for f, ii in enumerate(full_dataset.vtk_files): + logging.info(f" {ii: >2}: {f}") +''' diff --git a/RegDGCNN_SurfaceFields/My_python_job/utils.py b/RegDGCNN_SurfaceFields/My_python_job/utils.py new file mode 100644 index 0000000..41440ca --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/utils.py @@ -0,0 +1,243 @@ +# utils.py +""" + +@author: Mohamed Elrefaie, mohamed.elrefaie@mit.edu + +Utility functions for the DrivAerNet pressure prediction project. + +This module provides helper functions for logging, random seed setup, +visualization, and other common operations. +""" + +import os +import random +import numpy as np +import torch +import logging +import matplotlib.pyplot as plt +from matplotlib import cm +import pyvista as pv +from data_loader import PRESSURE_MEAN, PRESSURE_STD + + +def setup_seed(seed): + """ + Set the random seed for reproducibility. + + Args: + seed: The random seed to use + """ + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + # For reproducibility + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def setup_logger(log_file=None, level=logging.INFO): + """ + Set up the logger for the application. + + Args: + log_file: Path to the log file + level: Logging level + """ + # Create logger + logger = logging.getLogger() + logger.setLevel(level) + + # Remove existing handlers to avoid duplicate logs + for handler in logger.handlers[:]: + logger.removeHandler(handler) + + # Create console handler + console_handler = logging.StreamHandler() + console_handler.setLevel(level) + console_format = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') + console_handler.setFormatter(console_format) + logger.addHandler(console_handler) + + # Create file handler if log_file is provided + if log_file: + os.makedirs(os.path.dirname(log_file), exist_ok=True) + file_handler = logging.FileHandler(log_file) + file_handler.setLevel(level) + file_format = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') + file_handler.setFormatter(file_format) + logger.addHandler(file_handler) + + +def visualize_pressure_field(points, true_pressure, pred_pressure, output_path): + """ + Visualize the true and predicted pressure fields on a 3D model. + + Args: + points: 3D point cloud coordinates + true_pressure: Ground truth pressure values + pred_pressure: Predicted pressure values + output_path: Path to save the visualization + """ + # Reshape the points to be (num_points, 3) + # The issue is points has shape (3, num_points) + if points.shape[0] == 3 and points.ndim == 2: + # Transpose to get (num_points, 3) + points = points.T + + # Make sure pressure values are 1D arrays + if true_pressure.ndim > 1: + true_pressure = true_pressure.squeeze() + if pred_pressure.ndim > 1: + pred_pressure = pred_pressure.squeeze() + + # Denormalize pressure values if needed + # true_pressure = true_pressure * PRESSURE_STD + PRESSURE_MEAN + # pred_pressure = pred_pressure * PRESSURE_STD + PRESSURE_MEAN + + # Create PyVista point clouds + true_cloud = pv.PolyData(points) + true_cloud.point_data['pressure'] = true_pressure + + pred_cloud = pv.PolyData(points) + pred_cloud.point_data['pressure'] = pred_pressure + + # Create PyVista plotter + plotter = pv.Plotter(shape=(1, 2), off_screen=True) + + # Plot true pressure + plotter.subplot(0, 0) + plotter.add_text("True Pressure", font_size=16) + plotter.add_mesh(true_cloud, scalars='pressure', cmap='jet', point_size=5) + + # Plot predicted pressure + plotter.subplot(0, 1) + plotter.add_text("Predicted Pressure", font_size=16) + plotter.add_mesh(pred_cloud, scalars='pressure', cmap='jet', point_size=5) + + # Save figure + plotter.screenshot(output_path) + plotter.close() + +def write_pointnet_vtk(points: np.ndarray, + pressure: np.ndarray, + fname: str = "pressure_cloud.vtk", + binary: bool = True) -> None: + """ + :param points: (N,3) float array of xyz positions + :param pressure: (N,) or (N,1) float array per point + """ + assert points.ndim == 2 and points.shape[1] == 3, \ + f"points should be (N,3), got {points.shape}" + pressure = pressure.reshape(-1) + assert pressure.shape[0] == points.shape[0], \ + "pressure must have same N as points" + + # Wrap into a PyVista PolyData + cloud = pv.PolyData(points) + + # Attach per-point scalar array + cloud["pressure"] = pressure + + # (Optional) Quick visualization check + try: + cloud.plot(render_points_as_spheres=True, + scalars="pressure", + point_size=5, + cmap="viridis") + except Exception: + pass + + # Save to .vtk (legacy VTK format) + cloud.save(fname, binary=binary) + print(f"Saved {points.shape[0]} points with 'pressure' → {fname}") + +def plot_error_distribution(true_pressure, pred_pressure, output_path): + """ + Plot the distribution of prediction errors. + + Args: + true_pressure: Ground truth pressure values + pred_pressure: Predicted pressure values + output_path: Path to save the plot + """ + # Calculate absolute errors + errors = np.abs(true_pressure - pred_pressure) + + # Create histogram + plt.figure(figsize=(10, 6)) + plt.hist(errors, bins=50, alpha=0.7) + plt.xlabel('Absolute Error') + plt.ylabel('Frequency') + plt.title('Distribution of Prediction Errors') + plt.grid(True, alpha=0.3) + plt.savefig(output_path) + plt.close() + + +def save_training_curve(train_losses, val_losses, output_path): + """ + Save a plot of training and validation loss curves. + + Args: + train_losses: List of training losses + val_losses: List of validation losses + output_path: Path to save the plot + """ + plt.figure(figsize=(10, 5)) + plt.plot(range(1, len(train_losses) + 1), train_losses, marker='o', linestyle='-', label='Training Loss') + plt.plot(range(1, len(val_losses) + 1), val_losses, marker='s', linestyle='-', label='Validation Loss') + plt.xlabel('Epoch') + plt.ylabel('Loss') + plt.title('Training and Validation Loss') + plt.legend() + plt.grid(True, alpha=0.3) + plt.savefig(output_path) + plt.close() + + +def calculate_metrics(true_values, predicted_values): + """ + Calculate various evaluation metrics. + + Args: + true_values: Ground truth values + predicted_values: Predicted values + + Returns: + Dictionary of metrics + """ + # Convert to numpy if tensors + if torch.is_tensor(true_values): + true_values = true_values.cpu().numpy() + if torch.is_tensor(predicted_values): + predicted_values = predicted_values.cpu().numpy() + + # Mean Squared Error + mse = np.mean((true_values - predicted_values) ** 2) + + # Mean Absolute Error + mae = np.mean(np.abs(true_values - predicted_values)) + + # Root Mean Squared Error + rmse = np.sqrt(mse) + + # Maximum Absolute Error + max_error = np.max(np.abs(true_values - predicted_values)) + + # Relative L2 Error (normalized) + rel_l2 = np.mean(np.linalg.norm(true_values - predicted_values, axis=0) / + np.linalg.norm(true_values, axis=0)) + + # Relative L1 Error (normalized) + rel_l1 = np.mean(np.sum(np.abs(true_values - predicted_values), axis=0) / + np.sum(np.abs(true_values), axis=0)) + + return { + 'MSE': mse, + 'MAE': mae, + 'RMSE': rmse, + 'Max_Error': max_error, + 'Rel_L2': rel_l2, + 'Rel_L1': rel_l1 + } diff --git a/RegDGCNN_SurfaceFields/My_python_job/visualize_plt.py b/RegDGCNN_SurfaceFields/My_python_job/visualize_plt.py new file mode 100644 index 0000000..2185a2c --- /dev/null +++ b/RegDGCNN_SurfaceFields/My_python_job/visualize_plt.py @@ -0,0 +1,46 @@ +import os +import numpy as np +import matplotlib.pyplot as plt + +from utils import setup_logger, setup_seed, visualize_pressure_field, plot_error_distribution, calculate_metrics + + +DataPath = os.path.expandvars('$HOME/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/prediction_data/N_S_WWS_WM_292_prediction_data.npz') +data = np.load(DataPath) +points = data['points'] # shape (N_points, 3) +true_p = data['true_pressure_np'] # shape (N_points,) +pred_p = data['pred_pressure_np'] # shape (N_points,) + +output_path = os.path.expandvars('$HOME/ML_Turbulent/DrivAerNet/RegDGCNN_SurfaceFields/My_python_job/results/Train_Test/visualization') +os.makedirs(output_path, exist_ok=True) + +""" Use Pyvisa""" +visualize_pressure_field(points, true_p, pred_p, output_path) + +""" Use matplotlib.pyplot """ +fig = plt.figure(figsize=(12, 5)) +ax1 = fig.add_subplot(121, projection='3d') +p1 = ax1.scatter(points[:, 0], points[:, 1], points[:, 2], c=true_p, cmap='jet', s=1) +ax1.set_title('True Pressure', fontsize=14) +fig.colorbar(p1, ax=ax1, shrink=0.5) + +ax2 = fig.add_subplot(122, projection='3d') +p2 = ax2.scatter(points[:, 0], points[:, 1], points[:, 2], c=pred_p, cmap='jet', s=1) +ax2.set_title('Predicted Pressure', fontsize=14) +fig.colorbar(p2, ax=ax2, shrink=0.5) + +""" +error = np.abs(true_p - pred_p) + +fig = plt.figure(figsize=(5, 5)) +ax = fig.add_subplot(111, projection='3d') +p3 = ax.scatter(points[:, 0], points[:, 1], points[:, 2], c=error, cmap='hot') +ax.set_title('Prediction Error') +fig.colorbar(p3, ax=ax) +""" + +plt.tight_layout() +plt.savefig(os.path.join(output_path, "matplotlib_version.png"), dpi=300) +print(f"[Info]Saved to {os.path.join(output_path, 'visualization.png')}") + + diff --git a/RegDGCNN_SurfaceFields/Pressure_train.lsf b/RegDGCNN_SurfaceFields/Pressure_train.lsf new file mode 100644 index 0000000..55591fc --- /dev/null +++ b/RegDGCNN_SurfaceFields/Pressure_train.lsf @@ -0,0 +1,44 @@ +#!/bin/bash +#BSUB -J DrivAerNet_Pressure_GPU # Job name +#BSUB -q hgx # Queue name (change if needed) +#BSUB -n 1 # Number of CPU cores +#BSUB -R "span[ptile=1]" +#BSUB -gpu "num=1" +#BSUB -oo logs/out_%J.log # Standard output (%J = job ID) +#BSUB -eo logs/err_%J.log # Standard error +#BSUB -env "all" # Export your current environment + +# ------------------------------- +# Load environment and run script +# ------------------------------- + +echo "Starting DrivAerNet training on GPU nodes..." +echo "Running on host: $(hostname)" +echo "Job ID: $LSB_JOBID" +echo "Requested GPUs: $LSB_GPU_REQ" + +# Activate d2l +source ~/lib/miniconda3/etc/profile.d/conda.sh +conda activate d2l + +# Optional: verify GPU is available +nvidia-smi + +# Remove old logs but keep the ones for this job +find ./logs/ -name '*.log' ! -name "*_${LSB_JOBID}*.log" -delete + +# Run your Preprocess script +# sh Model_Preprocess.sh + +# Run your training script +# sh Model_Training.sh + +# Run your Evaluation script + sh Model_Evaluation.sh + +# Run your Test script +# sh Model_Test.sh + + + + diff --git a/RegDGCNN_SurfaceFields/data_loader.py b/RegDGCNN_SurfaceFields/data_loader.py index 330e241..7c8af9b 100644 --- a/RegDGCNN_SurfaceFields/data_loader.py +++ b/RegDGCNN_SurfaceFields/data_loader.py @@ -200,4 +200,4 @@ def get_dataloaders(dataset_path: str, subset_dir: str, num_points: int, batch_s # Constants for normalization PRESSURE_MEAN = -94.5 -PRESSURE_STD = 117.25 \ No newline at end of file +PRESSURE_STD = 117.25 diff --git a/RegDGCNN_SurfaceFields/evaluate.py b/RegDGCNN_SurfaceFields/evaluate.py index eae160b..dd029c7 100644 --- a/RegDGCNN_SurfaceFields/evaluate.py +++ b/RegDGCNN_SurfaceFields/evaluate.py @@ -24,28 +24,28 @@ def parse_args(): """Parse command line arguments.""" parser = argparse.ArgumentParser(description='Evaluate pressure prediction models on DrivAerNet++') - + # Basic settings parser.add_argument('--exp_name', type=str, required=True, help='Experiment name for results folder') parser.add_argument('--model_checkpoint', type=str, required=True, help='Path to model checkpoint') parser.add_argument('--seed', type=int, default=1, help='Random seed') - + # Data settings parser.add_argument('--dataset_path', type=str, required=True, help='Path to dataset') parser.add_argument('--cache_dir', type=str, help='Path to cache directory') parser.add_argument('--num_points', type=int, default=10000, help='Number of points to sample') parser.add_argument('--sample_ids', type=str, help='Path to file with sample IDs to evaluate') - + # Model settings parser.add_argument('--dropout', type=float, default=0.4, help='Dropout rate (for model initialization)') parser.add_argument('--emb_dims', type=int, default=1024, help='Embedding dimensions (for model initialization)') parser.add_argument('--k', type=int, default=40, help='Number of nearest neighbors (for model initialization)') parser.add_argument('--output_channels', type=int, default=1, help='Number of output channels') - + # Visualization settings parser.add_argument('--visualize', action='store_true', help='Generate visualizations') parser.add_argument('--num_vis_samples', type=int, default=5, help='Number of samples to visualize') - + return parser.parse_args() @@ -85,10 +85,10 @@ def initialize_model(args, device): def prepare_dataset(args): """ Prepare the dataset for evaluation. - + Args: args: Command line arguments - + Returns: Prepared dataset and sample indices """ @@ -99,17 +99,17 @@ def prepare_dataset(args): preprocess=False, # We don't need to preprocess if using cached data cache_dir=args.cache_dir ) - + # Determine which samples to evaluate if args.sample_ids: try: with open(args.sample_ids, 'r') as f: sample_ids = [id_.strip() for id_ in f.readlines()] - + # Filter to only include VTK files that match the sample IDs sample_files = [f for f in dataset.vtk_files if any(id_ in f for id_ in sample_ids)] sample_indices = [dataset.vtk_files.index(f) for f in sample_files] - + logging.info(f"Found {len(sample_indices)} samples matching the provided IDs") except Exception as e: logging.error(f"Error loading sample IDs: {e}") @@ -117,11 +117,11 @@ def prepare_dataset(args): else: # Use all samples sample_indices = list(range(len(dataset))) - + # If visualizing, limit to the specified number if args.visualize and args.num_vis_samples < len(sample_indices): sample_indices = sample_indices[:args.num_vis_samples] - + return dataset, sample_indices @@ -239,30 +239,30 @@ def main(): """Main function to run the evaluation.""" args = parse_args() setup_seed(args.seed) - + # Set up logging results_dir = os.path.join('results', args.exp_name) os.makedirs(results_dir, exist_ok=True) log_file = os.path.join(results_dir, 'evaluation.log') setup_logger(log_file) - + logging.info(f"Starting evaluation of RegDGCNN model") logging.info(f"Arguments: {args}") - + # Determine device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") logging.info(f"Using device: {device}") - + # Initialize model model = initialize_model(args, device) model.eval() - + # Prepare dataset dataset, sample_indices = prepare_dataset(args) - + # Evaluate model metrics = evaluate_model(model, dataset, sample_indices, args) - + # Log results logging.info("Evaluation Results:") for metric_name, value in metrics.items(): diff --git a/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/best_model.pth b/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/best_model.pth new file mode 100644 index 0000000..7b753b1 Binary files /dev/null and b/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/best_model.pth differ diff --git a/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/final_model.pth b/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/final_model.pth new file mode 100644 index 0000000..44bc06c Binary files /dev/null and b/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/final_model.pth differ diff --git a/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/test_metrics.txt b/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/test_metrics.txt new file mode 100644 index 0000000..0229d40 --- /dev/null +++ b/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/test_metrics.txt @@ -0,0 +1,7 @@ +Test MSE: 0.083962 +Test MAE: 0.163290 +Max MAE: 14.126799 +R² Score: 0.924995 +Relative L2 Error: 0.273214 +Relative L1 Error: 0.252141 +Total inference time: 0.03s for 54 samples diff --git a/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/training.log b/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/training.log new file mode 100644 index 0000000..630eb28 --- /dev/null +++ b/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/training.log @@ -0,0 +1,184 @@ +2025-07-07 15:42:14,469 - INFO - Arguments: Namespace(exp_name='DrivAerNet_Pressure', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', subset_dir='/work/mae-zhangbj/ML_Turbulent/DrivAerNet/train_val_test_splits', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, batch_size=6, epochs=150, lr=0.001, num_workers=1, test_only=False, gpus='0', dropout=0.4, emb_dims=1024, k=40, output_channels=1) +2025-07-07 15:42:14,474 - INFO - Starting training with 1 GPUs +2025-07-07 15:42:22,749 - INFO - Total trainable parameters: 1437705 +2025-07-07 15:42:22,919 - INFO - Data loaded: 39 training batches, 8 validation batches, 9 test batches +2025-07-07 15:42:22,922 - INFO - Starting training for 150 epochs +2025-07-07 15:42:44,782 - INFO - Epoch 1/150 - Train Loss: 0.765392, Val Loss: 1.117532 +2025-07-07 15:42:44,804 - INFO - New best model saved with Val Loss: 1.117532 +2025-07-07 15:43:02,641 - INFO - Epoch 2/150 - Train Loss: 0.425968, Val Loss: 0.480214 +2025-07-07 15:43:02,656 - INFO - New best model saved with Val Loss: 0.480214 +2025-07-07 15:43:20,481 - INFO - Epoch 3/150 - Train Loss: 0.359811, Val Loss: 0.444877 +2025-07-07 15:43:20,496 - INFO - New best model saved with Val Loss: 0.444877 +2025-07-07 15:43:38,310 - INFO - Epoch 4/150 - Train Loss: 0.312085, Val Loss: 0.590544 +2025-07-07 15:43:56,113 - INFO - Epoch 5/150 - Train Loss: 0.284683, Val Loss: 0.833210 +2025-07-07 15:44:13,942 - INFO - Epoch 6/150 - Train Loss: 0.266364, Val Loss: 1.044873 +2025-07-07 15:44:31,763 - INFO - Epoch 7/150 - Train Loss: 0.244517, Val Loss: 0.458772 +2025-07-07 15:44:49,564 - INFO - Epoch 8/150 - Train Loss: 0.227804, Val Loss: 1.118219 +2025-07-07 15:45:07,398 - INFO - Epoch 9/150 - Train Loss: 0.222722, Val Loss: 0.292218 +2025-07-07 15:45:07,414 - INFO - New best model saved with Val Loss: 0.292218 +2025-07-07 15:45:25,239 - INFO - Epoch 10/150 - Train Loss: 0.213346, Val Loss: 0.264306 +2025-07-07 15:45:25,254 - INFO - New best model saved with Val Loss: 0.264306 +2025-07-07 15:45:43,297 - INFO - Epoch 11/150 - Train Loss: 0.198541, Val Loss: 1.010461 +2025-07-07 15:46:01,118 - INFO - Epoch 12/150 - Train Loss: 0.188102, Val Loss: 0.402349 +2025-07-07 15:46:18,938 - INFO - Epoch 13/150 - Train Loss: 0.190205, Val Loss: 0.351225 +2025-07-07 15:46:36,740 - INFO - Epoch 14/150 - Train Loss: 0.188799, Val Loss: 0.817092 +2025-07-07 15:46:54,554 - INFO - Epoch 15/150 - Train Loss: 0.181614, Val Loss: 0.227752 +2025-07-07 15:46:54,569 - INFO - New best model saved with Val Loss: 0.227752 +2025-07-07 15:47:12,399 - INFO - Epoch 16/150 - Train Loss: 0.177835, Val Loss: 0.312880 +2025-07-07 15:47:30,190 - INFO - Epoch 17/150 - Train Loss: 0.174542, Val Loss: 0.204217 +2025-07-07 15:47:30,204 - INFO - New best model saved with Val Loss: 0.204217 +2025-07-07 15:47:48,045 - INFO - Epoch 18/150 - Train Loss: 0.171377, Val Loss: 0.178336 +2025-07-07 15:47:48,060 - INFO - New best model saved with Val Loss: 0.178336 +2025-07-07 15:48:05,887 - INFO - Epoch 19/150 - Train Loss: 0.167564, Val Loss: 0.218340 +2025-07-07 15:48:23,708 - INFO - Epoch 20/150 - Train Loss: 0.167842, Val Loss: 0.332284 +2025-07-07 15:48:41,670 - INFO - Epoch 21/150 - Train Loss: 0.163059, Val Loss: 0.209619 +2025-07-07 15:48:59,460 - INFO - Epoch 22/150 - Train Loss: 0.160918, Val Loss: 0.248925 +2025-07-07 15:49:17,300 - INFO - Epoch 23/150 - Train Loss: 0.157175, Val Loss: 0.624631 +2025-07-07 15:49:35,170 - INFO - Epoch 24/150 - Train Loss: 0.156025, Val Loss: 0.170956 +2025-07-07 15:49:35,185 - INFO - New best model saved with Val Loss: 0.170956 +2025-07-07 15:49:53,040 - INFO - Epoch 25/150 - Train Loss: 0.153771, Val Loss: 0.321865 +2025-07-07 15:50:10,856 - INFO - Epoch 26/150 - Train Loss: 0.148996, Val Loss: 0.149375 +2025-07-07 15:50:10,871 - INFO - New best model saved with Val Loss: 0.149375 +2025-07-07 15:50:28,684 - INFO - Epoch 27/150 - Train Loss: 0.149616, Val Loss: 0.178216 +2025-07-07 15:50:46,525 - INFO - Epoch 28/150 - Train Loss: 0.154057, Val Loss: 0.161862 +2025-07-07 15:51:04,340 - INFO - Epoch 29/150 - Train Loss: 0.146227, Val Loss: 0.175928 +2025-07-07 15:51:22,167 - INFO - Epoch 30/150 - Train Loss: 0.146135, Val Loss: 0.377274 +2025-07-07 15:51:40,076 - INFO - Epoch 31/150 - Train Loss: 0.145402, Val Loss: 0.213034 +2025-07-07 15:51:57,890 - INFO - Epoch 32/150 - Train Loss: 0.142771, Val Loss: 0.151741 +2025-07-07 15:52:15,700 - INFO - Epoch 33/150 - Train Loss: 0.145088, Val Loss: 0.146101 +2025-07-07 15:52:15,716 - INFO - New best model saved with Val Loss: 0.146101 +2025-07-07 15:52:33,527 - INFO - Epoch 34/150 - Train Loss: 0.139568, Val Loss: 0.273251 +2025-07-07 15:52:51,370 - INFO - Epoch 35/150 - Train Loss: 0.140397, Val Loss: 0.148392 +2025-07-07 15:53:09,232 - INFO - Epoch 36/150 - Train Loss: 0.136741, Val Loss: 0.150139 +2025-07-07 15:53:27,028 - INFO - Epoch 37/150 - Train Loss: 0.135322, Val Loss: 0.148857 +2025-07-07 15:53:44,902 - INFO - Epoch 38/150 - Train Loss: 0.132527, Val Loss: 0.130883 +2025-07-07 15:53:44,917 - INFO - New best model saved with Val Loss: 0.130883 +2025-07-07 15:54:02,744 - INFO - Epoch 39/150 - Train Loss: 0.135555, Val Loss: 0.149993 +2025-07-07 15:54:20,559 - INFO - Epoch 40/150 - Train Loss: 0.135385, Val Loss: 0.164116 +2025-07-07 15:54:38,528 - INFO - Epoch 41/150 - Train Loss: 0.132284, Val Loss: 0.146587 +2025-07-07 15:54:56,338 - INFO - Epoch 42/150 - Train Loss: 0.133476, Val Loss: 0.166374 +2025-07-07 15:55:14,144 - INFO - Epoch 43/150 - Train Loss: 0.132089, Val Loss: 0.138480 +2025-07-07 15:55:31,981 - INFO - Epoch 44/150 - Train Loss: 0.131016, Val Loss: 0.134061 +2025-07-07 15:55:49,783 - INFO - Epoch 45/150 - Train Loss: 0.131406, Val Loss: 0.128516 +2025-07-07 15:55:49,798 - INFO - New best model saved with Val Loss: 0.128516 +2025-07-07 15:56:07,606 - INFO - Epoch 46/150 - Train Loss: 0.127681, Val Loss: 0.202615 +2025-07-07 15:56:25,456 - INFO - Epoch 47/150 - Train Loss: 0.126269, Val Loss: 0.136413 +2025-07-07 15:56:43,264 - INFO - Epoch 48/150 - Train Loss: 0.128555, Val Loss: 0.211257 +2025-07-07 15:57:01,080 - INFO - Epoch 49/150 - Train Loss: 0.128324, Val Loss: 0.134678 +2025-07-07 15:57:18,926 - INFO - Epoch 50/150 - Train Loss: 0.125648, Val Loss: 0.164913 +2025-07-07 15:57:36,846 - INFO - Epoch 51/150 - Train Loss: 0.124183, Val Loss: 0.121055 +2025-07-07 15:57:36,860 - INFO - New best model saved with Val Loss: 0.121055 +2025-07-07 15:57:54,665 - INFO - Epoch 52/150 - Train Loss: 0.125603, Val Loss: 0.207307 +2025-07-07 15:58:12,504 - INFO - Epoch 53/150 - Train Loss: 0.122962, Val Loss: 0.134787 +2025-07-07 15:58:30,348 - INFO - Epoch 54/150 - Train Loss: 0.122586, Val Loss: 0.152079 +2025-07-07 15:58:48,146 - INFO - Epoch 55/150 - Train Loss: 0.122021, Val Loss: 0.123622 +2025-07-07 15:59:06,000 - INFO - Epoch 56/150 - Train Loss: 0.121857, Val Loss: 0.212121 +2025-07-07 15:59:23,833 - INFO - Epoch 57/150 - Train Loss: 0.123328, Val Loss: 0.150480 +2025-07-07 15:59:41,609 - INFO - Epoch 58/150 - Train Loss: 0.122286, Val Loss: 0.167857 +2025-07-07 15:59:59,403 - INFO - Epoch 59/150 - Train Loss: 0.121922, Val Loss: 0.140145 +2025-07-07 16:00:17,234 - INFO - Epoch 60/150 - Train Loss: 0.117993, Val Loss: 0.169449 +2025-07-07 16:00:35,168 - INFO - Epoch 61/150 - Train Loss: 0.120024, Val Loss: 0.118396 +2025-07-07 16:00:35,183 - INFO - New best model saved with Val Loss: 0.118396 +2025-07-07 16:00:53,004 - INFO - Epoch 62/150 - Train Loss: 0.120212, Val Loss: 0.193534 +2025-07-07 16:01:10,826 - INFO - Epoch 63/150 - Train Loss: 0.121789, Val Loss: 0.137048 +2025-07-07 16:01:28,635 - INFO - Epoch 64/150 - Train Loss: 0.119106, Val Loss: 0.115732 +2025-07-07 16:01:28,651 - INFO - New best model saved with Val Loss: 0.115732 +2025-07-07 16:01:46,444 - INFO - Epoch 65/150 - Train Loss: 0.116286, Val Loss: 0.150539 +2025-07-07 16:02:04,270 - INFO - Epoch 66/150 - Train Loss: 0.116608, Val Loss: 0.204416 +2025-07-07 16:02:22,087 - INFO - Epoch 67/150 - Train Loss: 0.116302, Val Loss: 0.160922 +2025-07-07 16:02:39,906 - INFO - Epoch 68/150 - Train Loss: 0.120537, Val Loss: 0.140712 +2025-07-07 16:02:57,722 - INFO - Epoch 69/150 - Train Loss: 0.116008, Val Loss: 0.133827 +2025-07-07 16:03:15,547 - INFO - Epoch 70/150 - Train Loss: 0.117500, Val Loss: 0.125367 +2025-07-07 16:03:33,500 - INFO - Epoch 71/150 - Train Loss: 0.115497, Val Loss: 0.111867 +2025-07-07 16:03:33,515 - INFO - New best model saved with Val Loss: 0.111867 +2025-07-07 16:03:51,323 - INFO - Epoch 72/150 - Train Loss: 0.113848, Val Loss: 0.149078 +2025-07-07 16:04:09,134 - INFO - Epoch 73/150 - Train Loss: 0.115463, Val Loss: 0.112712 +2025-07-07 16:04:26,958 - INFO - Epoch 74/150 - Train Loss: 0.112697, Val Loss: 0.117975 +2025-07-07 16:04:44,762 - INFO - Epoch 75/150 - Train Loss: 0.116695, Val Loss: 0.156708 +2025-07-07 16:05:02,569 - INFO - Epoch 76/150 - Train Loss: 0.115100, Val Loss: 0.130494 +2025-07-07 16:05:20,350 - INFO - Epoch 77/150 - Train Loss: 0.112540, Val Loss: 0.115179 +2025-07-07 16:05:38,132 - INFO - Epoch 78/150 - Train Loss: 0.111732, Val Loss: 0.116926 +2025-07-07 16:05:55,933 - INFO - Epoch 79/150 - Train Loss: 0.110727, Val Loss: 0.126542 +2025-07-07 16:06:13,737 - INFO - Epoch 80/150 - Train Loss: 0.111588, Val Loss: 0.117037 +2025-07-07 16:06:31,652 - INFO - Epoch 81/150 - Train Loss: 0.111193, Val Loss: 0.166504 +2025-07-07 16:06:49,411 - INFO - Epoch 82/150 - Train Loss: 0.111560, Val Loss: 0.212419 +2025-07-07 16:07:07,207 - INFO - Epoch 83/150 - Train Loss: 0.103109, Val Loss: 0.092613 +2025-07-07 16:07:07,222 - INFO - New best model saved with Val Loss: 0.092613 +2025-07-07 16:07:25,013 - INFO - Epoch 84/150 - Train Loss: 0.098936, Val Loss: 0.090915 +2025-07-07 16:07:25,028 - INFO - New best model saved with Val Loss: 0.090915 +2025-07-07 16:07:42,830 - INFO - Epoch 85/150 - Train Loss: 0.097921, Val Loss: 0.090856 +2025-07-07 16:07:42,845 - INFO - New best model saved with Val Loss: 0.090856 +2025-07-07 16:08:00,665 - INFO - Epoch 86/150 - Train Loss: 0.097712, Val Loss: 0.091261 +2025-07-07 16:08:18,461 - INFO - Epoch 87/150 - Train Loss: 0.097985, Val Loss: 0.089322 +2025-07-07 16:08:18,476 - INFO - New best model saved with Val Loss: 0.089322 +2025-07-07 16:08:36,334 - INFO - Epoch 88/150 - Train Loss: 0.097777, Val Loss: 0.090429 +2025-07-07 16:08:54,175 - INFO - Epoch 89/150 - Train Loss: 0.097160, Val Loss: 0.090603 +2025-07-07 16:09:11,965 - INFO - Epoch 90/150 - Train Loss: 0.097306, Val Loss: 0.090642 +2025-07-07 16:09:29,860 - INFO - Epoch 91/150 - Train Loss: 0.096543, Val Loss: 0.092955 +2025-07-07 16:09:47,698 - INFO - Epoch 92/150 - Train Loss: 0.097235, Val Loss: 0.090075 +2025-07-07 16:10:05,548 - INFO - Epoch 93/150 - Train Loss: 0.096742, Val Loss: 0.090382 +2025-07-07 16:10:23,339 - INFO - Epoch 94/150 - Train Loss: 0.095817, Val Loss: 0.090097 +2025-07-07 16:10:41,129 - INFO - Epoch 95/150 - Train Loss: 0.096246, Val Loss: 0.089551 +2025-07-07 16:10:58,937 - INFO - Epoch 96/150 - Train Loss: 0.095710, Val Loss: 0.088552 +2025-07-07 16:10:58,952 - INFO - New best model saved with Val Loss: 0.088552 +2025-07-07 16:11:16,740 - INFO - Epoch 97/150 - Train Loss: 0.096374, Val Loss: 0.089566 +2025-07-07 16:11:34,509 - INFO - Epoch 98/150 - Train Loss: 0.096752, Val Loss: 0.088469 +2025-07-07 16:11:34,524 - INFO - New best model saved with Val Loss: 0.088469 +2025-07-07 16:11:52,339 - INFO - Epoch 99/150 - Train Loss: 0.093261, Val Loss: 0.089550 +2025-07-07 16:12:10,130 - INFO - Epoch 100/150 - Train Loss: 0.095612, Val Loss: 0.096346 +2025-07-07 16:12:28,097 - INFO - Epoch 101/150 - Train Loss: 0.096476, Val Loss: 0.128738 +2025-07-07 16:12:45,885 - INFO - Epoch 102/150 - Train Loss: 0.096039, Val Loss: 0.090330 +2025-07-07 16:13:03,704 - INFO - Epoch 103/150 - Train Loss: 0.094943, Val Loss: 0.120177 +2025-07-07 16:13:21,493 - INFO - Epoch 104/150 - Train Loss: 0.095132, Val Loss: 0.088605 +2025-07-07 16:13:39,310 - INFO - Epoch 105/150 - Train Loss: 0.095240, Val Loss: 0.088804 +2025-07-07 16:13:57,171 - INFO - Epoch 106/150 - Train Loss: 0.095847, Val Loss: 0.092196 +2025-07-07 16:14:14,934 - INFO - Epoch 107/150 - Train Loss: 0.095251, Val Loss: 0.097188 +2025-07-07 16:14:32,779 - INFO - Epoch 108/150 - Train Loss: 0.096115, Val Loss: 0.095816 +2025-07-07 16:14:50,617 - INFO - Epoch 109/150 - Train Loss: 0.095379, Val Loss: 0.088785 +2025-07-07 16:15:08,441 - INFO - Epoch 110/150 - Train Loss: 0.094124, Val Loss: 0.086502 +2025-07-07 16:15:08,457 - INFO - New best model saved with Val Loss: 0.086502 +2025-07-07 16:15:26,393 - INFO - Epoch 111/150 - Train Loss: 0.093858, Val Loss: 0.087059 +2025-07-07 16:15:44,203 - INFO - Epoch 112/150 - Train Loss: 0.094762, Val Loss: 0.086803 +2025-07-07 16:16:02,019 - INFO - Epoch 113/150 - Train Loss: 0.094220, Val Loss: 0.086305 +2025-07-07 16:16:02,036 - INFO - New best model saved with Val Loss: 0.086305 +2025-07-07 16:16:19,882 - INFO - Epoch 114/150 - Train Loss: 0.093979, Val Loss: 0.086554 +2025-07-07 16:16:37,665 - INFO - Epoch 115/150 - Train Loss: 0.093502, Val Loss: 0.086534 +2025-07-07 16:16:55,482 - INFO - Epoch 116/150 - Train Loss: 0.093854, Val Loss: 0.086689 +2025-07-07 16:17:13,275 - INFO - Epoch 117/150 - Train Loss: 0.093443, Val Loss: 0.086520 +2025-07-07 16:17:31,061 - INFO - Epoch 118/150 - Train Loss: 0.093456, Val Loss: 0.086879 +2025-07-07 16:17:51,877 - INFO - Epoch 119/150 - Train Loss: 0.093429, Val Loss: 0.086754 +2025-07-07 16:18:13,834 - INFO - Epoch 120/150 - Train Loss: 0.093038, Val Loss: 0.086636 +2025-07-07 16:18:31,841 - INFO - Epoch 121/150 - Train Loss: 0.093188, Val Loss: 0.086384 +2025-07-07 16:18:49,696 - INFO - Epoch 122/150 - Train Loss: 0.093174, Val Loss: 0.086662 +2025-07-07 16:19:07,478 - INFO - Epoch 123/150 - Train Loss: 0.093458, Val Loss: 0.086432 +2025-07-07 16:19:25,295 - INFO - Epoch 124/150 - Train Loss: 0.093784, Val Loss: 0.086574 +2025-07-07 16:19:43,095 - INFO - Epoch 125/150 - Train Loss: 0.093038, Val Loss: 0.086059 +2025-07-07 16:19:43,112 - INFO - New best model saved with Val Loss: 0.086059 +2025-07-07 16:20:00,893 - INFO - Epoch 126/150 - Train Loss: 0.091458, Val Loss: 0.086364 +2025-07-07 16:20:18,706 - INFO - Epoch 127/150 - Train Loss: 0.092800, Val Loss: 0.086594 +2025-07-07 16:20:36,487 - INFO - Epoch 128/150 - Train Loss: 0.093453, Val Loss: 0.086473 +2025-07-07 16:20:54,304 - INFO - Epoch 129/150 - Train Loss: 0.092883, Val Loss: 0.086414 +2025-07-07 16:21:12,110 - INFO - Epoch 130/150 - Train Loss: 0.093339, Val Loss: 0.086173 +2025-07-07 16:21:29,994 - INFO - Epoch 131/150 - Train Loss: 0.093019, Val Loss: 0.086383 +2025-07-07 16:21:47,808 - INFO - Epoch 132/150 - Train Loss: 0.093502, Val Loss: 0.086444 +2025-07-07 16:22:05,665 - INFO - Epoch 133/150 - Train Loss: 0.093330, Val Loss: 0.086222 +2025-07-07 16:22:23,457 - INFO - Epoch 134/150 - Train Loss: 0.093427, Val Loss: 0.086623 +2025-07-07 16:22:41,277 - INFO - Epoch 135/150 - Train Loss: 0.093817, Val Loss: 0.086278 +2025-07-07 16:22:59,064 - INFO - Epoch 136/150 - Train Loss: 0.093156, Val Loss: 0.086409 +2025-07-07 16:23:16,867 - INFO - Epoch 137/150 - Train Loss: 0.092966, Val Loss: 0.086330 +2025-07-07 16:23:34,646 - INFO - Epoch 138/150 - Train Loss: 0.093484, Val Loss: 0.086202 +2025-07-07 16:23:52,465 - INFO - Epoch 139/150 - Train Loss: 0.092633, Val Loss: 0.086555 +2025-07-07 16:24:10,229 - INFO - Epoch 140/150 - Train Loss: 0.092953, Val Loss: 0.086549 +2025-07-07 16:24:28,103 - INFO - Epoch 141/150 - Train Loss: 0.092855, Val Loss: 0.086349 +2025-07-07 16:24:45,918 - INFO - Epoch 142/150 - Train Loss: 0.093116, Val Loss: 0.086281 +2025-07-07 16:25:03,675 - INFO - Epoch 143/150 - Train Loss: 0.093006, Val Loss: 0.086377 +2025-07-07 16:25:21,462 - INFO - Epoch 144/150 - Train Loss: 0.092932, Val Loss: 0.086349 +2025-07-07 16:25:39,236 - INFO - Epoch 145/150 - Train Loss: 0.092390, Val Loss: 0.086420 +2025-07-07 16:25:57,048 - INFO - Epoch 146/150 - Train Loss: 0.093185, Val Loss: 0.086400 +2025-07-07 16:26:14,862 - INFO - Epoch 147/150 - Train Loss: 0.093196, Val Loss: 0.086339 +2025-07-07 16:26:32,678 - INFO - Epoch 148/150 - Train Loss: 0.092870, Val Loss: 0.086243 +2025-07-07 16:26:50,521 - INFO - Epoch 149/150 - Train Loss: 0.092973, Val Loss: 0.086341 +2025-07-07 16:27:08,354 - INFO - Epoch 150/150 - Train Loss: 0.093295, Val Loss: 0.086364 +2025-07-07 16:27:08,477 - INFO - Final model saved to experiments/DrivAerNet_Pressure/final_model.pth +2025-07-07 16:27:08,482 - INFO - Testing the final model +2025-07-07 16:27:12,783 - INFO - Testing the best model diff --git a/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/training_progress.png b/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/training_progress.png new file mode 100644 index 0000000..0488f8b Binary files /dev/null and b/RegDGCNN_SurfaceFields/experiments/DrivAerNet_Pressure/training_progress.png differ diff --git a/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/aggregated_metrics.npz b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/aggregated_metrics.npz new file mode 100644 index 0000000..cc9170a Binary files /dev/null and b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/aggregated_metrics.npz differ diff --git a/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/evaluation.log b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/evaluation.log new file mode 100644 index 0000000..24ef074 --- /dev/null +++ b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/evaluation.log @@ -0,0 +1,68 @@ +2025-07-07 15:27:15,573 - INFO - Starting evaluation of RegDGCNN model +2025-07-07 15:27:15,576 - INFO - Arguments: Namespace(exp_name='DrivAerNet_Pressure', model_checkpoint='experiments/DrivAerNet_Pressure/best_model.pth', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, sample_ids=None, dropout=0.4, emb_dims=1024, k=40, output_channels=1, visualize=True, num_vis_samples=5) +2025-07-07 15:27:15,777 - INFO - Using device: cuda:0 +2025-07-07 15:27:15,961 - INFO - Loading model from experiments/DrivAerNet_Pressure/best_model.pth +2025-07-07 15:27:16,117 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-07 15:27:16,748 - INFO - Saved raw prediction data to results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-07 15:27:16,751 - INFO - Sample: N_S_WWS_WM_292, Max Error: 968.950439, Mean Error: 81.853455, Std Error: 82.278198 +2025-07-07 15:27:16,752 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-07 15:27:16,794 - INFO - Saved raw prediction data to results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-07 15:27:16,794 - INFO - Sample: N_S_WWS_WM_215, Max Error: 1269.971924, Mean Error: 81.675697, Std Error: 77.120552 +2025-07-07 15:27:16,795 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-07 15:27:16,836 - INFO - Saved raw prediction data to results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-07 15:27:16,837 - INFO - Sample: N_S_WWS_WM_073, Max Error: 8266.425781, Mean Error: 89.897110, Std Error: 146.105804 +2025-07-07 15:27:16,837 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-07 15:27:16,879 - INFO - Saved raw prediction data to results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-07 15:27:16,879 - INFO - Sample: N_S_WWS_WM_323, Max Error: 1473.806274, Mean Error: 90.924103, Std Error: 90.256660 +2025-07-07 15:27:16,880 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-07 15:27:16,920 - INFO - Saved raw prediction data to results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-07 15:27:16,921 - INFO - Sample: N_S_WWS_WM_240, Max Error: 1177.688843, Mean Error: 88.236313, Std Error: 85.693665 +2025-07-07 15:27:16,923 - INFO - Evaluation complete. Results saved to results/DrivAerNet_Pressure +2025-07-07 15:27:16,923 - INFO - Raw prediction data saved to results/DrivAerNet_Pressure/prediction_data +2025-07-07 15:27:16,925 - INFO - Evaluation Results: +2025-07-07 15:27:16,925 - INFO - MSE: 1.266539 +2025-07-07 15:27:16,925 - INFO - MSE_std: 0.447364 +2025-07-07 15:27:16,925 - INFO - MAE: 0.737888 +2025-07-07 15:27:16,925 - INFO - MAE_std: 0.033899 +2025-07-07 15:27:16,925 - INFO - RMSE: 1.110539 +2025-07-07 15:27:16,925 - INFO - RMSE_std: 0.182324 +2025-07-07 15:27:16,925 - INFO - Max_Error: 22.442377 +2025-07-07 15:27:16,925 - INFO - Max_Error_std: 24.069933 +2025-07-07 15:27:16,925 - INFO - Rel_L2: 9.471090 +2025-07-07 15:27:16,925 - INFO - Rel_L2_std: 3.523058 +2025-07-07 15:27:16,925 - INFO - Rel_L1: 9.471090 +2025-07-07 15:27:16,925 - INFO - Rel_L1_std: 3.523058 +2025-07-08 10:57:42,139 - INFO - Starting evaluation of RegDGCNN model +2025-07-08 10:57:42,144 - INFO - Arguments: Namespace(exp_name='DrivAerNet_Pressure', model_checkpoint='experiments/DrivAerNet_Pressure/best_model.pth', seed=1, dataset_path='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Data_Pressure/PressureVTK/N_S_WWS_WM', cache_dir='/work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data', num_points=10000, sample_ids=None, dropout=0.4, emb_dims=1024, k=40, output_channels=1, visualize=True, num_vis_samples=5) +2025-07-08 10:57:42,592 - INFO - Using device: cuda:0 +2025-07-08 10:57:42,829 - INFO - Loading model from experiments/DrivAerNet_Pressure/best_model.pth +2025-07-08 10:57:43,102 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_292.npz +2025-07-08 10:57:43,724 - INFO - Saved raw prediction data to results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_292_prediction_data.npz +2025-07-08 10:57:43,728 - INFO - Sample: N_S_WWS_WM_292, Max Error: 393.243195, Mean Error: 17.564648, Std Error: 25.212502 +2025-07-08 10:57:44,053 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_215.npz +2025-07-08 10:57:44,119 - INFO - Saved raw prediction data to results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_215_prediction_data.npz +2025-07-08 10:57:44,119 - INFO - Sample: N_S_WWS_WM_215, Max Error: 492.861816, Mean Error: 16.419321, Std Error: 24.999308 +2025-07-08 10:57:44,143 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_073.npz +2025-07-08 10:57:44,190 - INFO - Saved raw prediction data to results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_073_prediction_data.npz +2025-07-08 10:57:44,190 - INFO - Sample: N_S_WWS_WM_073, Max Error: 7149.266602, Mean Error: 17.930588, Std Error: 103.619286 +2025-07-08 10:57:44,208 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_323.npz +2025-07-08 10:57:44,267 - INFO - Saved raw prediction data to results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_323_prediction_data.npz +2025-07-08 10:57:44,267 - INFO - Sample: N_S_WWS_WM_323, Max Error: 493.402832, Mean Error: 18.697931, Std Error: 30.359562 +2025-07-08 10:57:44,284 - INFO - Loading cached data from /work/mae-zhangbj/ML_Turbulent/Data_Pressure_Field/Cache_data/N_S_WWS_WM_240.npz +2025-07-08 10:57:44,352 - INFO - Saved raw prediction data to results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_240_prediction_data.npz +2025-07-08 10:57:44,352 - INFO - Sample: N_S_WWS_WM_240, Max Error: 441.564026, Mean Error: 18.489374, Std Error: 25.603153 +2025-07-08 10:57:44,361 - INFO - Evaluation complete. Results saved to results/DrivAerNet_Pressure +2025-07-08 10:57:44,363 - INFO - Raw prediction data saved to results/DrivAerNet_Pressure/prediction_data +2025-07-08 10:57:44,363 - INFO - Evaluation Results: +2025-07-08 10:57:44,363 - INFO - MSE: 0.220634 +2025-07-08 10:57:44,363 - INFO - MSE_std: 0.292034 +2025-07-08 10:57:44,363 - INFO - MAE: 0.151986 +2025-07-08 10:57:44,363 - INFO - MAE_std: 0.006885 +2025-07-08 10:57:44,363 - INFO - RMSE: 0.397497 +2025-07-08 10:57:44,363 - INFO - RMSE_std: 0.250260 +2025-07-08 10:57:44,363 - INFO - Max_Error: 15.301218 +2025-07-08 10:57:44,363 - INFO - Max_Error_std: 22.838867 +2025-07-08 10:57:44,363 - INFO - Rel_L2: 1.749491 +2025-07-08 10:57:44,363 - INFO - Rel_L2_std: 0.377447 +2025-07-08 10:57:44,363 - INFO - Rel_L1: 1.749491 +2025-07-08 10:57:44,363 - INFO - Rel_L1_std: 0.377447 diff --git a/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/evaluation_metrics.txt b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/evaluation_metrics.txt new file mode 100644 index 0000000..9ca7f2c --- /dev/null +++ b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/evaluation_metrics.txt @@ -0,0 +1,16 @@ +Evaluation Metrics for RegDGCNN +Model Checkpoint: experiments/DrivAerNet_Pressure/best_model.pth +Number of samples: 5 + +MSE: 0.220634 +MSE_std: 0.292034 +MAE: 0.151986 +MAE_std: 0.006885 +RMSE: 0.397497 +RMSE_std: 0.250260 +Max_Error: 15.301218 +Max_Error_std: 22.838867 +Rel_L2: 1.749491 +Rel_L2_std: 0.377447 +Rel_L1: 1.749491 +Rel_L1_std: 0.377447 diff --git a/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_073_prediction_data.npz b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_073_prediction_data.npz new file mode 100644 index 0000000..f00e98d Binary files /dev/null and b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_073_prediction_data.npz differ diff --git a/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_215_prediction_data.npz b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_215_prediction_data.npz new file mode 100644 index 0000000..4c5a9b6 Binary files /dev/null and b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_215_prediction_data.npz differ diff --git a/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_240_prediction_data.npz b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_240_prediction_data.npz new file mode 100644 index 0000000..4212f58 Binary files /dev/null and b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_240_prediction_data.npz differ diff --git a/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_292_prediction_data.npz b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_292_prediction_data.npz new file mode 100644 index 0000000..88267ed Binary files /dev/null and b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_292_prediction_data.npz differ diff --git a/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_323_prediction_data.npz b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_323_prediction_data.npz new file mode 100644 index 0000000..6c6097c Binary files /dev/null and b/RegDGCNN_SurfaceFields/results/DrivAerNet_Pressure/prediction_data/N_S_WWS_WM_323_prediction_data.npz differ diff --git a/RegDGCNN_SurfaceFields/run_pipeline.py b/RegDGCNN_SurfaceFields/run_pipeline.py index 7cb09da..78c2589 100644 --- a/RegDGCNN_SurfaceFields/run_pipeline.py +++ b/RegDGCNN_SurfaceFields/run_pipeline.py @@ -43,7 +43,7 @@ def parse_args(): parser.add_argument('--epochs', type=int, default=150, help='Number of epochs') parser.add_argument('--lr', type=float, default=0.001, help='Learning rate') parser.add_argument('--num_workers', type=int, default=4, help='Number of data loading workers') - parser.add_argument('--gpus', type=str, default='0,1,2,3', help='GPUs to use (comma-separated)') + parser.add_argument('--gpus', type=str, default="0", help='GPUs to use (comma-separated)') # Model settings parser.add_argument('--dropout', type=float, default=0.4, help='Dropout rate') @@ -263,4 +263,4 @@ def main(): if __name__ == "__main__": - exit(main()) \ No newline at end of file + exit(main()) diff --git a/RegDGCNN_SurfaceFields/tail.sh b/RegDGCNN_SurfaceFields/tail.sh new file mode 100644 index 0000000..66d22c8 --- /dev/null +++ b/RegDGCNN_SurfaceFields/tail.sh @@ -0,0 +1,2 @@ +#!/bin/bash +tail -f ./logs/err_* diff --git a/RegDGCNN_SurfaceFields/test_metrics.txt b/RegDGCNN_SurfaceFields/test_metrics.txt new file mode 100644 index 0000000..a55769d --- /dev/null +++ b/RegDGCNN_SurfaceFields/test_metrics.txt @@ -0,0 +1,7 @@ +Test MSE: 0.265175 +Test MAE: 0.325761 +Max MAE: 19.508324 +R² Score: 0.763116 +Relative L2 Error: 0.481119 +Relative L1 Error: 0.503007 +Total inference time: 0.11s for 54 samples diff --git a/RegDGCNN_SurfaceFields/train.py b/RegDGCNN_SurfaceFields/train.py index 715e77d..581275a 100644 --- a/RegDGCNN_SurfaceFields/train.py +++ b/RegDGCNN_SurfaceFields/train.py @@ -46,7 +46,7 @@ def parse_args(): parser.add_argument('--lr', type=float, default=0.001, help='Learning rate') parser.add_argument('--num_workers', type=int, default=4, help='Number of data loading workers') parser.add_argument('--test_only', action='store_true', help='Only test the model, no training') - parser.add_argument('--gpus', type=str, default='0,1,2,3', help='GPUs to use (comma-separated)') + parser.add_argument('--gpus', type=str, default="0", help='GPUs to use (comma-separated)') # Model settings parser.add_argument('--dropout', type=float, default=0.4, help='Dropout rate') @@ -203,6 +203,7 @@ def train_and_evaluate(rank, world_size, args): dist.init_process_group(backend='nccl', init_method='env://', world_size=world_size, rank=rank) local_rank = rank + logging.info(f"local_rank: {local_rank}") torch.cuda.set_device(local_rank) # Set up logging (only on rank 0) @@ -353,4 +354,4 @@ def main(): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/error.py b/error.py new file mode 100644 index 0000000..776693b --- /dev/null +++ b/error.py @@ -0,0 +1,39 @@ +import numpy as np +import matplotlib.pyplot as plt + +# Spatial resolution test (for alpha) +dx = np.array([0.1, 0.05, 0.025, 0.0125]) +e_dx = np.array([1.5e-2, 3.8e-3, 9.5e-4, 2.4e-4]) + +# Time resolution test (for beta) +dt = np.array([0.01, 0.005, 0.0025, 0.00125]) +e_dt = np.array([2.0e-2, 5.0e-3, 1.25e-3, 3.1e-4]) + +# Plotting log-log error vs dx +plt.figure(figsize=(12, 5)) + +plt.subplot(1, 2, 1) +plt.loglog(dx, e_dx, 'o-', label='Error vs dx') +z = np.polyfit(np.log(dx), np.log(e_dx), 1) +plt.loglog(dx, np.exp(z[1]) * dx ** z[0], '--', label=f"Slope ≈ {z[0]:.2f}") +plt.xlabel('Δx') +plt.ylabel('Error') +plt.title('Spatial Convergence Rate (α)') +plt.legend() +plt.grid(True, which="both", ls="--") + +# Plotting log-log error vs dt +plt.subplot(1, 2, 2) +plt.loglog(dt, e_dt, 'o-', label='Error vs dt') +z = np.polyfit(np.log(dt), np.log(e_dt), 1) +plt.loglog(dt, np.exp(z[1]) * dt ** z[0], '--', label=f"Slope ≈ {z[0]:.2f}") +plt.xlabel('Δt') +plt.ylabel('Error') +plt.title('Temporal Convergence Rate (β)') +plt.legend() +plt.grid(True, which="both", ls="--") + +plt.tight_layout() +plt.savefig("error_convergence.png", dpi=300) +plt.show() + diff --git a/error_convergence.png b/error_convergence.png new file mode 100644 index 0000000..c084be5 Binary files /dev/null and b/error_convergence.png differ diff --git a/foo b/foo new file mode 100644 index 0000000..e69de29