1

发布时间 2023-03-23 20:15:25作者: helloWorldhelloWorld
23-03-18 16:30:58.219 - INFO:   name: LOLv1_model
use_tb_logger: True
model: video_base4_m
distortion: sr
scale: 1
gpu_ids: cuda:0
datasets:[
train:[
name: indoor
mode: video_samesize_lol
interval_list: [1]
random_reverse: False
border_mode: False
dataroot_GT: /media/mmsys/6f1091c9-4ed8-4a10-a03d-2acef144d2e1/SXY/Data/LOL/LOL-v1/our485/high
dataroot_LQ: /media/mmsys/6f1091c9-4ed8-4a10-a03d-2acef144d2e1/SXY/Data/LOL/LOL-v1/our485/low
train_size: [600, 400]
cache_keys: None
cache_data: True
padding: new_info
N_frames: 5
use_shuffle: True
n_workers: 3
batch_size: 32
GT_size: 128
LQ_size: 128
use_flip: True
use_rot: True
color: RGB
phase: train
scale: 1
data_type: img
]
val:[
name: REDS4
mode: video_samesize_lol
dataroot_GT: path_to_datasets/LOL-v1/our485/high
dataroot_LQ: path_to_datasets/LOL-v1/our485/low
cache_data: True
N_frames: 5
padding: new_info
train_size: [600, 400]
phase: val
scale: 1
data_type: img
]
]
network_G:[
which_model_G: low_light_transformer
nf: 64
nframes: 5
groups: 8
front_RBs: 1
back_RBs: 1
predeblur: True
HR_in: True
w_TSA: True
scale: 1
]
path:[
root: ./
strict_load: False
experiments_root: ./experiments/LOLv1_model
models: ./experiments/LOLv1_model/models
training_state: ./experiments/LOLv1_model/training_state
log: ./experiments/LOLv1_model
val_images: ./experiments/LOLv1_model/val_images
]
train:[
lr_G: 0.0004
lr_scheme: MultiStepLR
beta1: 0.9
beta2: 0.99
niter: 600000
ft_tsa_only: 0
warmup_iter: -1
lr_steps: [50000, 100000, 200000, 300000]
lr_gamma: 0.5
eta_min: 1e-07
pixel_criterion: cb
pixel_weight: 1.0
val_freq: 5000.0
manual.resize960x512_seed: 0
]
logger:[
print_freq: 100
save_checkpoint_freq: 5000.0
]
is_train: True
dist: False

23-03-18 16:30:58.338 - INFO: Random seed: 3882
23-03-18 16:30:58.358 - INFO: Dataset [VideoSameSizeDataset - indoor] is created.
23-03-18 16:30:58.359 - INFO: Number of train images: 485, iters: 16
23-03-18 16:30:58.359 - INFO: Total epochs needed: 37500 for iters 600,000
23-03-18 16:30:58.359 - INFO: Dataset [VideoSameSizeDataset - REDS4] is created.
23-03-18 16:30:58.359 - INFO: Number of val images in [REDS4]: 0
23-03-18 16:31:00.388 - INFO: Network G structure: DataParallel - low_light_transformer, with parameters: 39,124,099
23-03-18 16:31:00.388 - INFO: low_light_transformer(
(conv_first_1): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv_first_2): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(conv_first_3): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(feature_extraction): Sequential(
(0): ResidualBlock_noBN(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(recon_trunk): Sequential(
(0): ResidualBlock_noBN(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(upconv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(upconv2): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(pixel_shuffle): PixelShuffle(upscale_factor=2)
(HRconv): Conv2d(128, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv_last): Conv2d(64, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(lrelu): LeakyReLU(negative_slope=0.1, inplace=True)
(transformer): Encoder_patch66(
(dropout): Dropout(p=0.0, inplace=False)
(layer_stack): ModuleList(
(0): EncoderLayer3(
(slf_attn): MultiHeadAttention4(
(w_qs): Linear(in_features=1024, out_features=512, bias=False)
(w_ks): Linear(in_features=1024, out_features=512, bias=False)
(w_vs): Linear(in_features=1024, out_features=512, bias=False)
(fc): Linear(in_features=512, out_features=1024, bias=False)
(attention): ScaledDotProductAttention(
(dropout): Dropout(p=0.0, inplace=False)
)
(dropout): Dropout(p=0.0, inplace=False)
(layer_norm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)
)
(pos_ffn): PositionwiseFeedForward4(
(w_1): Linear(in_features=1024, out_features=2048, bias=True)
(w_2): Linear(in_features=2048, out_features=1024, bias=True)
(layer_norm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
(1): EncoderLayer3(
(slf_attn): MultiHeadAttention4(
(w_qs): Linear(in_features=1024, out_features=512, bias=False)
(w_ks): Linear(in_features=1024, out_features=512, bias=False)
(w_vs): Linear(in_features=1024, out_features=512, bias=False)
(fc): Linear(in_features=512, out_features=1024, bias=False)
(attention): ScaledDotProductAttention(
(dropout): Dropout(p=0.0, inplace=False)
)
(dropout): Dropout(p=0.0, inplace=False)
(layer_norm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)
)
(pos_ffn): PositionwiseFeedForward4(
(w_1): Linear(in_features=1024, out_features=2048, bias=True)
(w_2): Linear(in_features=2048, out_features=1024, bias=True)
(layer_norm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
(2): EncoderLayer3(
(slf_attn): MultiHeadAttention4(
(w_qs): Linear(in_features=1024, out_features=512, bias=False)
(w_ks): Linear(in_features=1024, out_features=512, bias=False)
(w_vs): Linear(in_features=1024, out_features=512, bias=False)
(fc): Linear(in_features=512, out_features=1024, bias=False)
(attention): ScaledDotProductAttention(
(dropout): Dropout(p=0.0, inplace=False)
)
(dropout): Dropout(p=0.0, inplace=False)
(layer_norm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)
)
(pos_ffn): PositionwiseFeedForward4(
(w_1): Linear(in_features=1024, out_features=2048, bias=True)
(w_2): Linear(in_features=2048, out_features=1024, bias=True)
(layer_norm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
(3): EncoderLayer3(
(slf_attn): MultiHeadAttention4(
(w_qs): Linear(in_features=1024, out_features=512, bias=False)
(w_ks): Linear(in_features=1024, out_features=512, bias=False)
(w_vs): Linear(in_features=1024, out_features=512, bias=False)
(fc): Linear(in_features=512, out_features=1024, bias=False)
(attention): ScaledDotProductAttention(
(dropout): Dropout(p=0.0, inplace=False)
)
(dropout): Dropout(p=0.0, inplace=False)
(layer_norm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)
)
(pos_ffn): PositionwiseFeedForward4(
(w_1): Linear(in_features=1024, out_features=2048, bias=True)
(w_2): Linear(in_features=2048, out_features=1024, bias=True)
(layer_norm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
(4): EncoderLayer3(
(slf_attn): MultiHeadAttention4(
(w_qs): Linear(in_features=1024, out_features=512, bias=False)
(w_ks): Linear(in_features=1024, out_features=512, bias=False)
(w_vs): Linear(in_features=1024, out_features=512, bias=False)
(fc): Linear(in_features=512, out_features=1024, bias=False)
(attention): ScaledDotProductAttention(
(dropout): Dropout(p=0.0, inplace=False)
)
(dropout): Dropout(p=0.0, inplace=False)
(layer_norm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)
)
(pos_ffn): PositionwiseFeedForward4(
(w_1): Linear(in_features=1024, out_features=2048, bias=True)
(w_2): Linear(in_features=2048, out_features=1024, bias=True)
(layer_norm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
(5): EncoderLayer3(
(slf_attn): MultiHeadAttention4(
(w_qs): Linear(in_features=1024, out_features=512, bias=False)
(w_ks): Linear(in_features=1024, out_features=512, bias=False)
(w_vs): Linear(in_features=1024, out_features=512, bias=False)
(fc): Linear(in_features=512, out_features=1024, bias=False)
(attention): ScaledDotProductAttention(
(dropout): Dropout(p=0.0, inplace=False)
)
(dropout): Dropout(p=0.0, inplace=False)
(layer_norm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)
)
(pos_ffn): PositionwiseFeedForward4(
(w_1): Linear(in_features=1024, out_features=2048, bias=True)
(w_2): Linear(in_features=2048, out_features=1024, bias=True)
(layer_norm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
)
)
(recon_trunk_light): Sequential(
(0): ResidualBlock_noBN(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(1): ResidualBlock_noBN(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(2): ResidualBlock_noBN(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(3): ResidualBlock_noBN(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(4): ResidualBlock_noBN(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(5): ResidualBlock_noBN(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
)
23-03-18 16:31:00.815 - INFO: Model [VideoBaseModel] is created.
23-03-18 16:31:00.816 - INFO: Start training from epoch: 0, iter: 0