File size: 1,219 Bytes
bb7f76d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#!/bin/bash
# run_models.sh

# export HF_HOME=/apdcephfs_sh2/share_300000800/user/zongxia/hf_cache
# export TRANSFORMERS_CACHE=/apdcephfs_sh2/share_300000800/user/zongxia/hf_cache

./move_eval.sh

model_paths=(
    # "Qwen/Qwen2.5-VL-3B-Instruct"
    # "/apdcephfs_sh2/share_300000800/user/zongxia/Video-R1/src/r1-v/log/3B-Video-GRPO-NoDesEval/checkpoint-1000"
    # "/apdcephfs_sh2/share_300000800/user/zongxia/Video-R1/src/r1-v/log/3B-Video-GRPO-selfEval-ThenNoDesEval/pool_numerical_chunk_02/checkpoint-42"
    # "/apdcephfs_sh2/share_300000800/user/zongxia/Video-R1/src/r1-v/log/3B-Video-GRPO-AnswerBERT/video_pool_multiple_choice_chunk_02/checkpoint-46"
    # "Video-R1/Video-R1-7B"
    "zli12321/VideoHallu-R1-v3"
    # "Qwen/Qwen2.5-VL-7B-Instruct"
)

file_names=(
    # "qwen_3B_base"
    # "qwen_3B_noDesEval"
    # "qwen_3B_answerBERT_thenNoDesEval"
    # "qwen_3B_answerBERT_video12"
    # "video-R1-7B"
    "VideoHallu-R1-v3"
    # "Qwen2.5-VL-7B-Instruct"
)

export DECORD_EOF_RETRY_MAX=20480


for i in "${!model_paths[@]}"; do
    model="${model_paths[$i]}"
    file_name="${file_names[$i]}"
    CUDA_VISIBLE_DEVICES=0,1,2,3 python ./src/eval_bench.py --model_path "$model" --file_name "$file_name"
done