diff --git a/PyTorch/built-in/foundation/CogVLM/cogvlm_utils/finetune_cogvlm_base_224.sh b/PyTorch/built-in/foundation/CogVLM/cogvlm_utils/finetune_cogvlm_base_224.sh index c9f825c48c6eebd6f3827b61113b411bc00b36a5..40e9e8a06341ef2083ef1f440cba91564ef63d1a 100644 --- a/PyTorch/built-in/foundation/CogVLM/cogvlm_utils/finetune_cogvlm_base_224.sh +++ b/PyTorch/built-in/foundation/CogVLM/cogvlm_utils/finetune_cogvlm_base_224.sh @@ -45,7 +45,7 @@ gpt_options=" \ --split 1. \ --deepspeed_config test_config_bf16.json \ --skip-init \ - --seed 1234 + --seed 1234 > train_cogvlm.log 2>&1 & " diff --git a/PyTorch/built-in/foundation/MiniGPT-4/test/finetune_gpt_1p.sh b/PyTorch/built-in/foundation/MiniGPT-4/test/finetune_gpt_1p.sh index 2e1ade809230f59df846253e060d3b3d3a35d6bb..bbd7e04462f2d38532b0b5fc13c4223658b3ace4 100755 --- a/PyTorch/built-in/foundation/MiniGPT-4/test/finetune_gpt_1p.sh +++ b/PyTorch/built-in/foundation/MiniGPT-4/test/finetune_gpt_1p.sh @@ -13,4 +13,4 @@ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" OPTIONS="run.max_epoch=2 run.iters_per_epoch=240 run.batch_size_train=10 run.batch_size_eval=10 " -torchrun $DISTRIBUTED_ARGS train.py --cfg-path train_configs/minigpt4_stage2_finetune.yaml --options ${OPTIONS} +torchrun $DISTRIBUTED_ARGS train.py --cfg-path train_configs/minigpt4_stage2_finetune.yaml --options ${OPTIONS} > minigpt4_finetune.log 2>&1 & diff --git a/PyTorch/built-in/foundation/MiniGPT-4/test/pretrain_gpt_4p.sh b/PyTorch/built-in/foundation/MiniGPT-4/test/pretrain_gpt_4p.sh index f53589b2be454eab474292a66353fc7c7e72f69b..10976fdcd2d7ca8b9c893ae4dc126c684d185146 100755 --- a/PyTorch/built-in/foundation/MiniGPT-4/test/pretrain_gpt_4p.sh +++ b/PyTorch/built-in/foundation/MiniGPT-4/test/pretrain_gpt_4p.sh @@ -14,4 +14,4 @@ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" OPTIONS="run.max_epoch=4 run.iters_per_epoch=8000 run.batch_size_train=40 run.batch_size_eval=40 " -torchrun $DISTRIBUTED_ARGS train.py --cfg-path train_configs/minigpt4_stage1_pretrain.yaml --options ${OPTIONS} +torchrun $DISTRIBUTED_ARGS train.py --cfg-path train_configs/minigpt4_stage1_pretrain.yaml --options ${OPTIONS} > minigpt4_pretrain.log 2>&1 &